From c11f3297208a3acb7532897ab5f5b6bcceaa1c6c Mon Sep 17 00:00:00 2001 From: Moritz Mack Date: Fri, 12 Jan 2024 16:16:27 +0100 Subject: [PATCH 01/95] Temporarily tolerate tracing.apm.agent.global_labels.XYZ settings (#104317) --- .../elasticsearch/telemetry/apm/internal/APMAgentSettings.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java index 12e81e7ae78e1..f2e6b6372c267 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/APMAgentSettings.java @@ -228,7 +228,7 @@ public void setAgentSetting(String key, String value) { return new Setting<>(qualifiedKey, "", (value) -> { if (qualifiedKey.equals("_na_") == false && PERMITTED_AGENT_KEYS.contains(key) == false) { // TODO figure out why those settings are kept, these should be reformatted / removed by now - if (key.startsWith("global_labels.")) { + if (qualifiedKey.startsWith("tracing.apm.agent.global_labels.")) { return value; } throw new IllegalArgumentException("Configuration [" + qualifiedKey + "] is either prohibited or unknown."); From abccea00d11aef2815e48cfadf786b1ae5e50c4e Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 12 Jan 2024 16:41:37 +0100 Subject: [PATCH 02/95] Move some test-only parsing code to test modules (#104261) None of this code needs to live in the prod classes, moving it over to the test code where its used. --- .../mustache/SearchTemplateResponse.java | 30 ---- .../mustache/SearchTemplateResponseTests.java | 26 +++- .../cluster/health/ClusterHealthResponse.java | 133 +++--------------- .../settings/get/GetSettingsResponse.java | 58 -------- .../action/bulk/BulkItemResponse.java | 99 +------------ .../action/bulk/BulkResponse.java | 50 +------ .../elasticsearch/action/get/GetResponse.java | 34 ----- .../action/ingest/GetPipelineResponse.java | 31 ---- .../ingest/SimulateDocumentVerboseResult.java | 19 --- .../action/search/ClearScrollResponse.java | 25 +--- .../health/ClusterHealthResponsesTests.java | 97 ++++++++++++- .../get/GetSettingsResponseTests.java | 55 +++++++- .../action/bulk/BulkItemResponseTests.java | 83 ++++++++++- .../action/bulk/BulkResponseTests.java | 42 +++++- .../action/get/GetResponseTests.java | 20 ++- .../ingest/GetPipelineResponseTests.java | 22 ++- .../SimulateDocumentVerboseResultTests.java | 20 ++- .../search/ClearScrollResponseTests.java | 26 +++- 18 files changed, 403 insertions(+), 467 deletions(-) diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java index 34e771c51e4f4..39da4066a7859 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java @@ -14,7 +14,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContent; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.RefCounted; import org.elasticsearch.rest.RestStatus; @@ -22,14 +21,10 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.io.InputStream; -import java.util.Map; public class SearchTemplateResponse extends ActionResponse implements ToXContentObject { public static ParseField TEMPLATE_OUTPUT_FIELD = new ParseField("template_output"); @@ -108,31 +103,6 @@ public boolean hasReferences() { return refCounted.hasReferences(); } - public static SearchTemplateResponse fromXContent(XContentParser parser) throws IOException { - SearchTemplateResponse searchTemplateResponse = new SearchTemplateResponse(); - Map contentAsMap = parser.map(); - - if (contentAsMap.containsKey(TEMPLATE_OUTPUT_FIELD.getPreferredName())) { - Object source = contentAsMap.get(TEMPLATE_OUTPUT_FIELD.getPreferredName()); - XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).value(source); - searchTemplateResponse.setSource(BytesReference.bytes(builder)); - } else { - XContentType contentType = parser.contentType(); - XContentBuilder builder = XContentFactory.contentBuilder(contentType).map(contentAsMap); - try ( - XContentParser searchResponseParser = XContentHelper.createParserNotCompressed( - XContentParserConfiguration.EMPTY.withRegistry(parser.getXContentRegistry()) - .withDeprecationHandler(parser.getDeprecationHandler()), - BytesReference.bytes(builder), - contentType - ) - ) { - searchTemplateResponse.setResponse(SearchResponse.fromXContent(searchResponseParser)); - } - } - return searchTemplateResponse; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java index 73c8887669a02..fce288db66620 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchResponseUtils; @@ -20,9 +21,11 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.util.Map; import java.util.function.Predicate; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; @@ -42,7 +45,28 @@ protected SearchTemplateResponse createTestInstance() { @Override protected SearchTemplateResponse doParseInstance(XContentParser parser) throws IOException { - return SearchTemplateResponse.fromXContent(parser); + SearchTemplateResponse searchTemplateResponse = new SearchTemplateResponse(); + Map contentAsMap = parser.map(); + + if (contentAsMap.containsKey(SearchTemplateResponse.TEMPLATE_OUTPUT_FIELD.getPreferredName())) { + Object source = contentAsMap.get(SearchTemplateResponse.TEMPLATE_OUTPUT_FIELD.getPreferredName()); + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).value(source); + searchTemplateResponse.setSource(BytesReference.bytes(builder)); + } else { + XContentType contentType = parser.contentType(); + XContentBuilder builder = XContentFactory.contentBuilder(contentType).map(contentAsMap); + try ( + XContentParser searchResponseParser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(parser.getXContentRegistry()) + .withDeprecationHandler(parser.getDeprecationHandler()), + BytesReference.bytes(builder), + contentType + ) + ) { + searchTemplateResponse.setResponse(SearchResponse.fromXContent(searchResponseParser)); + } + } + return searchTemplateResponse; } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java index 19e84e7443eed..e7e2679e84eb5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -17,132 +17,35 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; -import static java.util.Collections.emptyMap; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - public class ClusterHealthResponse extends ActionResponse implements ToXContentObject { - private static final String CLUSTER_NAME = "cluster_name"; - private static final String STATUS = "status"; - private static final String TIMED_OUT = "timed_out"; - private static final String NUMBER_OF_NODES = "number_of_nodes"; - private static final String NUMBER_OF_DATA_NODES = "number_of_data_nodes"; - private static final String NUMBER_OF_PENDING_TASKS = "number_of_pending_tasks"; - private static final String NUMBER_OF_IN_FLIGHT_FETCH = "number_of_in_flight_fetch"; - private static final String DELAYED_UNASSIGNED_SHARDS = "delayed_unassigned_shards"; + static final String CLUSTER_NAME = "cluster_name"; + static final String STATUS = "status"; + static final String TIMED_OUT = "timed_out"; + static final String NUMBER_OF_NODES = "number_of_nodes"; + static final String NUMBER_OF_DATA_NODES = "number_of_data_nodes"; + static final String NUMBER_OF_PENDING_TASKS = "number_of_pending_tasks"; + static final String NUMBER_OF_IN_FLIGHT_FETCH = "number_of_in_flight_fetch"; + static final String DELAYED_UNASSIGNED_SHARDS = "delayed_unassigned_shards"; private static final String TASK_MAX_WAIT_TIME_IN_QUEUE = "task_max_waiting_in_queue"; - private static final String TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS = "task_max_waiting_in_queue_millis"; - private static final String ACTIVE_SHARDS_PERCENT_AS_NUMBER = "active_shards_percent_as_number"; + static final String TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS = "task_max_waiting_in_queue_millis"; + static final String ACTIVE_SHARDS_PERCENT_AS_NUMBER = "active_shards_percent_as_number"; private static final String ACTIVE_SHARDS_PERCENT = "active_shards_percent"; - private static final String ACTIVE_PRIMARY_SHARDS = "active_primary_shards"; - private static final String ACTIVE_SHARDS = "active_shards"; - private static final String RELOCATING_SHARDS = "relocating_shards"; - private static final String INITIALIZING_SHARDS = "initializing_shards"; - private static final String UNASSIGNED_SHARDS = "unassigned_shards"; - private static final String INDICES = "indices"; - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "cluster_health_response", - true, - parsedObjects -> { - int i = 0; - // ClusterStateHealth fields - int numberOfNodes = (int) parsedObjects[i++]; - int numberOfDataNodes = (int) parsedObjects[i++]; - int activeShards = (int) parsedObjects[i++]; - int relocatingShards = (int) parsedObjects[i++]; - int activePrimaryShards = (int) parsedObjects[i++]; - int initializingShards = (int) parsedObjects[i++]; - int unassignedShards = (int) parsedObjects[i++]; - double activeShardsPercent = (double) parsedObjects[i++]; - String statusStr = (String) parsedObjects[i++]; - ClusterHealthStatus status = ClusterHealthStatus.fromString(statusStr); - @SuppressWarnings("unchecked") - List indexList = (List) parsedObjects[i++]; - final Map indices; - if (indexList == null || indexList.isEmpty()) { - indices = emptyMap(); - } else { - indices = Maps.newMapWithExpectedSize(indexList.size()); - for (ClusterIndexHealth indexHealth : indexList) { - indices.put(indexHealth.getIndex(), indexHealth); - } - } - ClusterStateHealth stateHealth = new ClusterStateHealth( - activePrimaryShards, - activeShards, - relocatingShards, - initializingShards, - unassignedShards, - numberOfNodes, - numberOfDataNodes, - activeShardsPercent, - status, - indices - ); - - // ClusterHealthResponse fields - String clusterName = (String) parsedObjects[i++]; - int numberOfPendingTasks = (int) parsedObjects[i++]; - int numberOfInFlightFetch = (int) parsedObjects[i++]; - int delayedUnassignedShards = (int) parsedObjects[i++]; - long taskMaxWaitingTimeMillis = (long) parsedObjects[i++]; - boolean timedOut = (boolean) parsedObjects[i]; - return new ClusterHealthResponse( - clusterName, - numberOfPendingTasks, - numberOfInFlightFetch, - delayedUnassignedShards, - TimeValue.timeValueMillis(taskMaxWaitingTimeMillis), - timedOut, - stateHealth - ); - } - ); - - private static final ObjectParser.NamedObjectParser INDEX_PARSER = ( - XContentParser parser, - Void context, - String index) -> ClusterIndexHealth.innerFromXContent(parser, index); - - static { - // ClusterStateHealth fields - PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_NODES)); - PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_DATA_NODES)); - PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(RELOCATING_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_PRIMARY_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(INITIALIZING_SHARDS)); - PARSER.declareInt(constructorArg(), new ParseField(UNASSIGNED_SHARDS)); - PARSER.declareDouble(constructorArg(), new ParseField(ACTIVE_SHARDS_PERCENT_AS_NUMBER)); - PARSER.declareString(constructorArg(), new ParseField(STATUS)); - // Can be absent if LEVEL == 'cluster' - PARSER.declareNamedObjects(optionalConstructorArg(), INDEX_PARSER, new ParseField(INDICES)); - - // ClusterHealthResponse fields - PARSER.declareString(constructorArg(), new ParseField(CLUSTER_NAME)); - PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_PENDING_TASKS)); - PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_IN_FLIGHT_FETCH)); - PARSER.declareInt(constructorArg(), new ParseField(DELAYED_UNASSIGNED_SHARDS)); - PARSER.declareLong(constructorArg(), new ParseField(TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS)); - PARSER.declareBoolean(constructorArg(), new ParseField(TIMED_OUT)); - } + static final String ACTIVE_PRIMARY_SHARDS = "active_primary_shards"; + static final String ACTIVE_SHARDS = "active_shards"; + static final String RELOCATING_SHARDS = "relocating_shards"; + static final String INITIALIZING_SHARDS = "initializing_shards"; + static final String UNASSIGNED_SHARDS = "unassigned_shards"; + static final String INDICES = "indices"; private String clusterName; private int numberOfPendingTasks = 0; @@ -370,10 +273,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static ClusterHealthResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java index ec3a5f71c3a48..8a106d1b43d3e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java @@ -15,15 +15,12 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; -import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.json.JsonXContent; import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Objects; @@ -91,61 +88,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(indexToDefaultSettings, StreamOutput::writeWriteable); } - private static void parseSettingsField( - XContentParser parser, - String currentIndexName, - Map indexToSettings, - Map indexToDefaultSettings - ) throws IOException { - - if (parser.currentToken() == XContentParser.Token.START_OBJECT) { - switch (parser.currentName()) { - case "settings" -> indexToSettings.put(currentIndexName, Settings.fromXContent(parser)); - case "defaults" -> indexToDefaultSettings.put(currentIndexName, Settings.fromXContent(parser)); - default -> parser.skipChildren(); - } - } else if (parser.currentToken() == XContentParser.Token.START_ARRAY) { - parser.skipChildren(); - } - parser.nextToken(); - } - - private static void parseIndexEntry( - XContentParser parser, - Map indexToSettings, - Map indexToDefaultSettings - ) throws IOException { - String indexName = parser.currentName(); - parser.nextToken(); - while (parser.isClosed() == false && parser.currentToken() != XContentParser.Token.END_OBJECT) { - parseSettingsField(parser, indexName, indexToSettings, indexToDefaultSettings); - } - } - - public static GetSettingsResponse fromXContent(XContentParser parser) throws IOException { - HashMap indexToSettings = new HashMap<>(); - HashMap indexToDefaultSettings = new HashMap<>(); - - if (parser.currentToken() == null) { - parser.nextToken(); - } - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); - parser.nextToken(); - - while (parser.isClosed() == false) { - if (parser.currentToken() == XContentParser.Token.START_OBJECT) { - // we must assume this is an index entry - parseIndexEntry(parser, indexToSettings, indexToDefaultSettings); - } else if (parser.currentToken() == XContentParser.Token.START_ARRAY) { - parser.skipChildren(); - } else { - parser.nextToken(); - } - } - - return new GetSettingsResponse(Map.copyOf(indexToSettings), Map.copyOf(indexToDefaultSettings)); - } - @Override public String toString() { try { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index c2b6c666d829a..151e8795d0f82 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -21,26 +21,17 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - /** * Represents a single item response for an action executed as part of the bulk API. Holds the index/type/id * of the relevant action, and if it has failed or not (with the failure message in case it failed). @@ -49,8 +40,8 @@ public class BulkItemResponse implements Writeable, ToXContentObject { private static final String _INDEX = "_index"; private static final String _ID = "_id"; - private static final String STATUS = "status"; - private static final String ERROR = "error"; + static final String STATUS = "status"; + static final String ERROR = "error"; public RestStatus status() { return failure == null ? response.status() : failure.getStatus(); @@ -80,80 +71,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - /** - * Reads a {@link BulkItemResponse} from a {@link XContentParser}. - * - * @param parser the {@link XContentParser} - * @param id the id to assign to the parsed {@link BulkItemResponse}. It is usually the index of - * the item in the {@link BulkResponse#getItems} array. - */ - public static BulkItemResponse fromXContent(XContentParser parser, int id) throws IOException { - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); - - XContentParser.Token token = parser.nextToken(); - ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); - - String currentFieldName = parser.currentName(); - token = parser.nextToken(); - - final OpType opType = OpType.fromString(currentFieldName); - ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); - - DocWriteResponse.Builder builder = null; - CheckedConsumer itemParser = null; - - if (opType == OpType.INDEX || opType == OpType.CREATE) { - final IndexResponse.Builder indexResponseBuilder = new IndexResponse.Builder(); - builder = indexResponseBuilder; - itemParser = (indexParser) -> IndexResponse.parseXContentFields(indexParser, indexResponseBuilder); - - } else if (opType == OpType.UPDATE) { - final UpdateResponse.Builder updateResponseBuilder = new UpdateResponse.Builder(); - builder = updateResponseBuilder; - itemParser = (updateParser) -> UpdateResponse.parseXContentFields(updateParser, updateResponseBuilder); - - } else if (opType == OpType.DELETE) { - final DeleteResponse.Builder deleteResponseBuilder = new DeleteResponse.Builder(); - builder = deleteResponseBuilder; - itemParser = (deleteParser) -> DeleteResponse.parseXContentFields(deleteParser, deleteResponseBuilder); - } else { - throwUnknownField(currentFieldName, parser); - } - - RestStatus status = null; - ElasticsearchException exception = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } - - if (ERROR.equals(currentFieldName)) { - if (token == XContentParser.Token.START_OBJECT) { - exception = ElasticsearchException.fromXContent(parser); - } - } else if (STATUS.equals(currentFieldName)) { - if (token == XContentParser.Token.VALUE_NUMBER) { - status = RestStatus.fromCode(parser.intValue()); - } - } else { - itemParser.accept(parser); - } - } - - ensureExpectedToken(XContentParser.Token.END_OBJECT, token, parser); - token = parser.nextToken(); - ensureExpectedToken(XContentParser.Token.END_OBJECT, token, parser); - - BulkItemResponse bulkItemResponse; - if (exception != null) { - Failure failure = new Failure(builder.getShardId().getIndexName(), builder.getId(), exception, status); - bulkItemResponse = BulkItemResponse.failure(id, opType, failure); - } else { - bulkItemResponse = BulkItemResponse.success(id, opType, builder.build()); - } - return bulkItemResponse; - } - /** * Represents a failure. */ @@ -171,18 +88,6 @@ public static class Failure implements Writeable, ToXContentFragment { private final long term; private final boolean aborted; - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "bulk_failures", - true, - a -> new Failure((String) a[0], (String) a[1], (Exception) a[2], RestStatus.fromCode((int) a[3])) - ); - static { - PARSER.declareString(constructorArg(), new ParseField(INDEX_FIELD)); - PARSER.declareString(optionalConstructorArg(), new ParseField(ID_FIELD)); - PARSER.declareObject(constructorArg(), (p, c) -> ElasticsearchException.fromXContent(p), new ParseField(CAUSE_FIELD)); - PARSER.declareInt(constructorArg(), new ParseField(STATUS_FIELD)); - } - /** * For write failures before operation was assigned a sequence number. * diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index 2065a31ce5566..111dbfb0f7af6 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -15,16 +15,9 @@ import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.ArrayList; import java.util.Iterator; -import java.util.List; - -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; -import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken; /** * A response of a bulk execution. Holding a response for each item responding (in order) of the @@ -33,10 +26,10 @@ */ public class BulkResponse extends ActionResponse implements Iterable, ChunkedToXContentObject { - private static final String ITEMS = "items"; - private static final String ERRORS = "errors"; - private static final String TOOK = "took"; - private static final String INGEST_TOOK = "ingest_took"; + static final String ITEMS = "items"; + static final String ERRORS = "errors"; + static final String TOOK = "took"; + static final String INGEST_TOOK = "ingest_took"; public static final long NO_INGEST_TOOK = -1L; @@ -133,41 +126,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeZLong(ingestTookInMillis); } - public static BulkResponse fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token = parser.nextToken(); - ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); - - long took = -1L; - long ingestTook = NO_INGEST_TOOK; - List items = new ArrayList<>(); - - String currentFieldName = parser.currentName(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if (TOOK.equals(currentFieldName)) { - took = parser.longValue(); - } else if (INGEST_TOOK.equals(currentFieldName)) { - ingestTook = parser.longValue(); - } else if (ERRORS.equals(currentFieldName) == false) { - throwUnknownField(currentFieldName, parser); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if (ITEMS.equals(currentFieldName)) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - items.add(BulkItemResponse.fromXContent(parser, items.size())); - } - } else { - throwUnknownField(currentFieldName, parser); - } - } else { - throwUnknownToken(token, parser); - } - } - return new BulkResponse(items.toArray(new BulkItemResponse[items.size()]), took, ingestTook); - } - @Override public Iterator toXContentChunked(ToXContent.Params params) { return Iterators.concat(Iterators.single((builder, p) -> { diff --git a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java index 6871c60f11a15..5b407d0ebceb0 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetResponse.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; @@ -19,11 +18,9 @@ import org.elasticsearch.index.get.GetResult; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Iterator; -import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -149,37 +146,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return getResult.toXContent(builder, params); } - /** - * This method can be used to parse a {@link GetResponse} object when it has been printed out - * as a xcontent using the {@link #toXContent(XContentBuilder, Params)} method. - *

- * For forward compatibility reason this method might not fail if it tries to parse a field it - * doesn't know. But before returning the result it will check that enough information were - * parsed to return a valid {@link GetResponse} instance and throws a {@link ParsingException} - * otherwise. This is the case when we get a 404 back, which can be parsed as a normal - * {@link GetResponse} with found set to false, or as an elasticsearch exception. The caller - * of this method needs a way to figure out whether we got back a valid get response, which - * can be done by catching ParsingException. - * - * @param parser {@link XContentParser} to parse the response from - * @return a {@link GetResponse} - * @throws IOException is an I/O exception occurs during the parsing - */ - public static GetResponse fromXContent(XContentParser parser) throws IOException { - GetResult getResult = GetResult.fromXContent(parser); - - // At this stage we ensure that we parsed enough information to return - // a valid GetResponse instance. If it's not the case, we throw an - // exception so that callers know it and can handle it correctly. - if (getResult.getIndex() == null && getResult.getId() == null) { - throw new ParsingException( - parser.getTokenLocation(), - String.format(Locale.ROOT, "Missing required fields [%s,%s]", GetResult._INDEX, GetResult._ID) - ); - } - return new GetResponse(getResult); - } - @Override public void writeTo(StreamOutput out) throws IOException { getResult.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java index bc9c88a706f30..c685a49cddf2f 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/GetPipelineResponse.java @@ -10,15 +10,12 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParser.Token; import java.io.IOException; import java.util.ArrayList; @@ -27,8 +24,6 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - public class GetPipelineResponse extends ActionResponse implements ToXContentObject { private final List pipelines; @@ -90,32 +85,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - /** - * - * @param parser the parser for the XContent that contains the serialized GetPipelineResponse. - * @return an instance of GetPipelineResponse read from the parser - * @throws IOException If the parsing fails - */ - public static GetPipelineResponse fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - List pipelines = new ArrayList<>(); - while (parser.nextToken().equals(Token.FIELD_NAME)) { - String pipelineId = parser.currentName(); - parser.nextToken(); - try (XContentBuilder contentBuilder = XContentBuilder.builder(parser.contentType().xContent())) { - contentBuilder.generator().copyCurrentStructure(parser); - PipelineConfiguration pipeline = new PipelineConfiguration( - pipelineId, - BytesReference.bytes(contentBuilder), - contentBuilder.contentType() - ); - pipelines.add(pipeline); - } - } - ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.currentToken(), parser); - return new GetPipelineResponse(pipelines); - } - @Override public boolean equals(Object other) { if (other == null) { diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResult.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResult.java index 850ff50cd0187..28aec1ee0ebb8 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResult.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResult.java @@ -9,17 +9,12 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; import java.util.List; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - /** * Holds the result of what a pipeline did to a sample document via the simulate api, but instead of {@link SimulateDocumentBaseResult} * this result class holds the intermediate result each processor did to the sample document. @@ -28,16 +23,6 @@ public final class SimulateDocumentVerboseResult implements SimulateDocumentResu public static final String PROCESSOR_RESULT_FIELD = "processor_results"; private final List processorResults; - @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "simulate_document_verbose_result", - true, - a -> new SimulateDocumentVerboseResult((List) a[0]) - ); - static { - PARSER.declareObjectArray(constructorArg(), SimulateProcessorResult.PARSER, new ParseField(PROCESSOR_RESULT_FIELD)); - } - public SimulateDocumentVerboseResult(List processorResults) { this.processorResults = processorResults; } @@ -73,8 +58,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } - - public static SimulateDocumentVerboseResult fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java index 8b1116951df82..3d00d18565756 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java @@ -12,33 +12,19 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; public class ClearScrollResponse extends ActionResponse implements ToXContentObject { - private static final ParseField SUCCEEDED = new ParseField("succeeded"); - private static final ParseField NUMFREED = new ParseField("num_freed"); - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "clear_scroll", - true, - a -> new ClosePointInTimeResponse((boolean) a[0], (int) a[1]) - ); - static { - PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SUCCEEDED, ObjectParser.ValueType.BOOLEAN); - PARSER.declareField(constructorArg(), (parser, context) -> parser.intValue(), NUMFREED, ObjectParser.ValueType.INT); - } + public static final ParseField SUCCEEDED = new ParseField("succeeded"); + public static final ParseField NUMFREED = new ParseField("num_freed"); private final boolean succeeded; private final int numFreed; @@ -82,13 +68,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - /** - * Parse the clear scroll response body into a new {@link ClearScrollResponse} object - */ - public static ClosePointInTimeResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(succeeded); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index 0f4c01c674b1a..d4231c9f7538b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -23,23 +23,118 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.hamcrest.Matchers; import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.function.Predicate; import java.util.regex.Pattern; +import static java.util.Collections.emptyMap; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.hamcrest.CoreMatchers.allOf; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; public class ClusterHealthResponsesTests extends AbstractXContentSerializingTestCase { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "cluster_health_response", + true, + parsedObjects -> { + int i = 0; + // ClusterStateHealth fields + int numberOfNodes = (int) parsedObjects[i++]; + int numberOfDataNodes = (int) parsedObjects[i++]; + int activeShards = (int) parsedObjects[i++]; + int relocatingShards = (int) parsedObjects[i++]; + int activePrimaryShards = (int) parsedObjects[i++]; + int initializingShards = (int) parsedObjects[i++]; + int unassignedShards = (int) parsedObjects[i++]; + double activeShardsPercent = (double) parsedObjects[i++]; + String statusStr = (String) parsedObjects[i++]; + ClusterHealthStatus status = ClusterHealthStatus.fromString(statusStr); + @SuppressWarnings("unchecked") + List indexList = (List) parsedObjects[i++]; + final Map indices; + if (indexList == null || indexList.isEmpty()) { + indices = emptyMap(); + } else { + indices = Maps.newMapWithExpectedSize(indexList.size()); + for (ClusterIndexHealth indexHealth : indexList) { + indices.put(indexHealth.getIndex(), indexHealth); + } + } + ClusterStateHealth stateHealth = new ClusterStateHealth( + activePrimaryShards, + activeShards, + relocatingShards, + initializingShards, + unassignedShards, + numberOfNodes, + numberOfDataNodes, + activeShardsPercent, + status, + indices + ); + + // ClusterHealthResponse fields + String clusterName = (String) parsedObjects[i++]; + int numberOfPendingTasks = (int) parsedObjects[i++]; + int numberOfInFlightFetch = (int) parsedObjects[i++]; + int delayedUnassignedShards = (int) parsedObjects[i++]; + long taskMaxWaitingTimeMillis = (long) parsedObjects[i++]; + boolean timedOut = (boolean) parsedObjects[i]; + return new ClusterHealthResponse( + clusterName, + numberOfPendingTasks, + numberOfInFlightFetch, + delayedUnassignedShards, + TimeValue.timeValueMillis(taskMaxWaitingTimeMillis), + timedOut, + stateHealth + ); + } + ); + + private static final ObjectParser.NamedObjectParser INDEX_PARSER = ( + XContentParser parser, + Void context, + String index) -> ClusterIndexHealth.innerFromXContent(parser, index); + + static { + // ClusterStateHealth fields + PARSER.declareInt(constructorArg(), new ParseField(ClusterHealthResponse.NUMBER_OF_NODES)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterHealthResponse.NUMBER_OF_DATA_NODES)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterHealthResponse.ACTIVE_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterHealthResponse.RELOCATING_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterHealthResponse.ACTIVE_PRIMARY_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterHealthResponse.INITIALIZING_SHARDS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterHealthResponse.UNASSIGNED_SHARDS)); + PARSER.declareDouble(constructorArg(), new ParseField(ClusterHealthResponse.ACTIVE_SHARDS_PERCENT_AS_NUMBER)); + PARSER.declareString(constructorArg(), new ParseField(ClusterHealthResponse.STATUS)); + // Can be absent if LEVEL == 'cluster' + PARSER.declareNamedObjects(optionalConstructorArg(), INDEX_PARSER, new ParseField(ClusterHealthResponse.INDICES)); + + // ClusterHealthResponse fields + PARSER.declareString(constructorArg(), new ParseField(ClusterHealthResponse.CLUSTER_NAME)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterHealthResponse.NUMBER_OF_PENDING_TASKS)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterHealthResponse.NUMBER_OF_IN_FLIGHT_FETCH)); + PARSER.declareInt(constructorArg(), new ParseField(ClusterHealthResponse.DELAYED_UNASSIGNED_SHARDS)); + PARSER.declareLong(constructorArg(), new ParseField(ClusterHealthResponse.TASK_MAX_WAIT_TIME_IN_QUEUE_IN_MILLIS)); + PARSER.declareBoolean(constructorArg(), new ParseField(ClusterHealthResponse.TIMED_OUT)); + } + private final ClusterStatsLevel level = randomFrom(ClusterStatsLevel.values()); public void testIsTimeout() { @@ -102,7 +197,7 @@ ClusterHealthResponse maybeSerialize(ClusterHealthResponse clusterHealth) throws @Override protected ClusterHealthResponse doParseInstance(XContentParser parser) { - return ClusterHealthResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponseTests.java index 6c9297bb41ae0..86968bda62d91 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponseTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.xcontent.XContentParser; @@ -18,6 +19,7 @@ import java.io.IOException; import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.util.Set; import java.util.function.Predicate; @@ -70,7 +72,58 @@ protected Writeable.Reader instanceReader() { @Override protected GetSettingsResponse doParseInstance(XContentParser parser) throws IOException { - return GetSettingsResponse.fromXContent(parser); + HashMap indexToSettings = new HashMap<>(); + HashMap indexToDefaultSettings = new HashMap<>(); + + if (parser.currentToken() == null) { + parser.nextToken(); + } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + parser.nextToken(); + + while (parser.isClosed() == false) { + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + // we must assume this is an index entry + parseIndexEntry(parser, indexToSettings, indexToDefaultSettings); + } else if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + parser.skipChildren(); + } else { + parser.nextToken(); + } + } + + return new GetSettingsResponse(Map.copyOf(indexToSettings), Map.copyOf(indexToDefaultSettings)); + } + + private static void parseIndexEntry( + XContentParser parser, + Map indexToSettings, + Map indexToDefaultSettings + ) throws IOException { + String indexName = parser.currentName(); + parser.nextToken(); + while (parser.isClosed() == false && parser.currentToken() != XContentParser.Token.END_OBJECT) { + parseSettingsField(parser, indexName, indexToSettings, indexToDefaultSettings); + } + } + + private static void parseSettingsField( + XContentParser parser, + String currentIndexName, + Map indexToSettings, + Map indexToDefaultSettings + ) throws IOException { + + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + switch (parser.currentName()) { + case "settings" -> indexToSettings.put(currentIndexName, Settings.fromXContent(parser)); + case "defaults" -> indexToDefaultSettings.put(currentIndexName, Settings.fromXContent(parser)); + default -> parser.skipChildren(); + } + } else if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + parser.skipChildren(); + } + parser.nextToken(); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java index ccf9681d3680b..76b1fa0011540 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkItemResponseTests.java @@ -13,15 +13,18 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; +import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.delete.DeleteResponseTests; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.IndexResponseTests; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.action.update.UpdateResponseTests; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -34,6 +37,8 @@ import static org.elasticsearch.ElasticsearchExceptionTests.assertDeepEquals; import static org.elasticsearch.ElasticsearchExceptionTests.randomExceptions; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; import static org.hamcrest.Matchers.containsString; public class BulkItemResponseTests extends ESTestCase { @@ -93,7 +98,7 @@ public void testToAndFromXContent() throws IOException { BulkItemResponse parsedBulkItemResponse; try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); - parsedBulkItemResponse = BulkItemResponse.fromXContent(parser, bulkItemId); + parsedBulkItemResponse = itemResponseFromXContent(parser, bulkItemId); assertNull(parser.nextToken()); } assertBulkItemResponse(expectedBulkItemResponse, parsedBulkItemResponse); @@ -127,7 +132,7 @@ public void testFailureToAndFromXContent() throws IOException { BulkItemResponse parsedBulkItemResponse; try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); - parsedBulkItemResponse = BulkItemResponse.fromXContent(parser, itemId); + parsedBulkItemResponse = itemResponseFromXContent(parser, itemId); assertNull(parser.nextToken()); } assertBulkItemResponse(expectedBulkItemResponse, parsedBulkItemResponse); @@ -161,4 +166,78 @@ public static void assertBulkItemResponse(BulkItemResponse expected, BulkItemRes } } } + + /** + * Reads a {@link BulkItemResponse} from a {@link XContentParser}. + * + * @param parser the {@link XContentParser} + * @param id the id to assign to the parsed {@link BulkItemResponse}. It is usually the index of + * the item in the {@link BulkResponse#getItems} array. + */ + public static BulkItemResponse itemResponseFromXContent(XContentParser parser, int id) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + + XContentParser.Token token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + + String currentFieldName = parser.currentName(); + token = parser.nextToken(); + + final DocWriteRequest.OpType opType = DocWriteRequest.OpType.fromString(currentFieldName); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + + DocWriteResponse.Builder builder = null; + CheckedConsumer itemParser = null; + + if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { + final IndexResponse.Builder indexResponseBuilder = new IndexResponse.Builder(); + builder = indexResponseBuilder; + itemParser = (indexParser) -> IndexResponse.parseXContentFields(indexParser, indexResponseBuilder); + + } else if (opType == DocWriteRequest.OpType.UPDATE) { + final UpdateResponse.Builder updateResponseBuilder = new UpdateResponse.Builder(); + builder = updateResponseBuilder; + itemParser = (updateParser) -> UpdateResponse.parseXContentFields(updateParser, updateResponseBuilder); + + } else if (opType == DocWriteRequest.OpType.DELETE) { + final DeleteResponse.Builder deleteResponseBuilder = new DeleteResponse.Builder(); + builder = deleteResponseBuilder; + itemParser = (deleteParser) -> DeleteResponse.parseXContentFields(deleteParser, deleteResponseBuilder); + } else { + throwUnknownField(currentFieldName, parser); + } + + RestStatus status = null; + ElasticsearchException exception = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } + + if (BulkItemResponse.ERROR.equals(currentFieldName)) { + if (token == XContentParser.Token.START_OBJECT) { + exception = ElasticsearchException.fromXContent(parser); + } + } else if (BulkItemResponse.STATUS.equals(currentFieldName)) { + if (token == XContentParser.Token.VALUE_NUMBER) { + status = RestStatus.fromCode(parser.intValue()); + } + } else { + itemParser.accept(parser); + } + } + + ensureExpectedToken(XContentParser.Token.END_OBJECT, token, parser); + token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.END_OBJECT, token, parser); + + BulkItemResponse bulkItemResponse; + if (exception != null) { + Failure failure = new Failure(builder.getShardId().getIndexName(), builder.getId(), exception, status); + bulkItemResponse = BulkItemResponse.failure(id, opType, failure); + } else { + bulkItemResponse = BulkItemResponse.success(id, opType, builder.build()); + } + return bulkItemResponse; + } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkResponseTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkResponseTests.java index c1cd88e0864a4..366196b6a0eac 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkResponseTests.java @@ -24,11 +24,16 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.elasticsearch.ElasticsearchExceptionTests.randomExceptions; import static org.elasticsearch.action.bulk.BulkItemResponseTests.assertBulkItemResponse; import static org.elasticsearch.action.bulk.BulkResponse.NO_INGEST_TOOK; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; +import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownField; +import static org.elasticsearch.common.xcontent.XContentParserUtils.throwUnknownToken; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.equalTo; @@ -78,7 +83,7 @@ public void testToAndFromXContent() throws IOException { BulkResponse parsedBulkResponse; try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { - parsedBulkResponse = BulkResponse.fromXContent(parser); + parsedBulkResponse = fromXContent(parser); assertNull(parser.nextToken()); } @@ -154,4 +159,39 @@ public void testToXContentPlacesErrorsFirst() throws IOException { } return randomDocWriteResponses; } + + private static BulkResponse fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + + long took = -1L; + long ingestTook = NO_INGEST_TOOK; + List items = new ArrayList<>(); + + String currentFieldName = parser.currentName(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (BulkResponse.TOOK.equals(currentFieldName)) { + took = parser.longValue(); + } else if (BulkResponse.INGEST_TOOK.equals(currentFieldName)) { + ingestTook = parser.longValue(); + } else if (BulkResponse.ERRORS.equals(currentFieldName) == false) { + throwUnknownField(currentFieldName, parser); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (BulkResponse.ITEMS.equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + items.add(BulkItemResponseTests.itemResponseFromXContent(parser, items.size())); + } + } else { + throwUnknownField(currentFieldName, parser); + } + } else { + throwUnknownToken(token, parser); + } + } + return new BulkResponse(items.toArray(new BulkItemResponse[items.size()]), took, ingestTook); + } } diff --git a/server/src/test/java/org/elasticsearch/action/get/GetResponseTests.java b/server/src/test/java/org/elasticsearch/action/get/GetResponseTests.java index ab72bf17beca9..49697101b3234 100644 --- a/server/src/test/java/org/elasticsearch/action/get/GetResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/GetResponseTests.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.Collections; +import java.util.Locale; import java.util.function.Predicate; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; @@ -69,7 +70,7 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws } GetResponse parsedGetResponse; try (XContentParser parser = createParser(xContentType.xContent(), mutated)) { - parsedGetResponse = GetResponse.fromXContent(parser); + parsedGetResponse = parseInstance(parser); assertNull(parser.nextToken()); } assertEquals(expectedGetResponse.getSourceAsMap(), parsedGetResponse.getSourceAsMap()); @@ -172,7 +173,7 @@ public void testFromXContentThrowsParsingException() throws IOException { BytesReference originalBytes = toShuffledXContent(getResponse, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { - ParsingException exception = expectThrows(ParsingException.class, () -> GetResponse.fromXContent(parser)); + ParsingException exception = expectThrows(ParsingException.class, () -> parseInstance(parser)); assertEquals("Missing required fields [_index,_id]", exception.getMessage()); } } @@ -184,4 +185,19 @@ private static GetResponse copyGetResponse(GetResponse getResponse) { private static GetResponse mutateGetResponse(GetResponse getResponse) { return new GetResponse(mutateGetResult(getResponse.getResult)); } + + private static GetResponse parseInstance(XContentParser parser) throws IOException { + GetResult getResult = GetResult.fromXContent(parser); + + // At this stage we ensure that we parsed enough information to return + // a valid GetResponse instance. If it's not the case, we throw an + // exception so that callers know it and can handle it correctly. + if (getResult.getIndex() == null && getResult.getId() == null) { + throw new ParsingException( + parser.getTokenLocation(), + String.format(Locale.ROOT, "Missing required fields [%s,%s]", GetResult._INDEX, GetResult._ID) + ); + } + return new GetResponse(getResult); + } } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java index 6f5841f3d2a03..c1ed3a670dffd 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java @@ -26,6 +26,8 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + public class GetPipelineResponseTests extends AbstractXContentSerializingTestCase { private XContentBuilder getRandomXContentBuilder() throws IOException { @@ -69,7 +71,7 @@ public void testXContentDeserialization() throws IOException { .xContent() .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput()) ) { - parsedResponse = GetPipelineResponse.fromXContent(parser); + parsedResponse = doParseInstance(parser); } List actualPipelines = response.pipelines(); List parsedPipelines = parsedResponse.pipelines(); @@ -82,7 +84,23 @@ public void testXContentDeserialization() throws IOException { @Override protected GetPipelineResponse doParseInstance(XContentParser parser) throws IOException { - return GetPipelineResponse.fromXContent(parser); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + List pipelines = new ArrayList<>(); + while (parser.nextToken().equals(XContentParser.Token.FIELD_NAME)) { + String pipelineId = parser.currentName(); + parser.nextToken(); + try (XContentBuilder contentBuilder = XContentBuilder.builder(parser.contentType().xContent())) { + contentBuilder.generator().copyCurrentStructure(parser); + PipelineConfiguration pipeline = new PipelineConfiguration( + pipelineId, + BytesReference.bytes(contentBuilder), + contentBuilder.contentType() + ); + pipelines.add(pipeline); + } + } + ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.currentToken(), parser); + return new GetPipelineResponse(pipelines); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResultTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResultTests.java index ebfeb310a916b..921637d06b982 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResultTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulateDocumentVerboseResultTests.java @@ -8,6 +8,8 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -17,8 +19,24 @@ import java.util.function.Predicate; import java.util.function.Supplier; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + public class SimulateDocumentVerboseResultTests extends AbstractXContentTestCase { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "simulate_document_verbose_result", + true, + a -> new SimulateDocumentVerboseResult((List) a[0]) + ); + static { + PARSER.declareObjectArray( + constructorArg(), + SimulateProcessorResult.PARSER, + new ParseField(SimulateDocumentVerboseResult.PROCESSOR_RESULT_FIELD) + ); + } + static SimulateDocumentVerboseResult createTestInstance(boolean withFailures) { int numDocs = randomIntBetween(0, 5); List results = new ArrayList<>(); @@ -42,7 +60,7 @@ protected SimulateDocumentVerboseResult createTestInstance() { @Override protected SimulateDocumentVerboseResult doParseInstance(XContentParser parser) { - return SimulateDocumentVerboseResult.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/search/ClearScrollResponseTests.java b/server/src/test/java/org/elasticsearch/search/ClearScrollResponseTests.java index 8b2759ff5a7a0..51f741e4f03fc 100644 --- a/server/src/test/java/org/elasticsearch/search/ClearScrollResponseTests.java +++ b/server/src/test/java/org/elasticsearch/search/ClearScrollResponseTests.java @@ -9,9 +9,12 @@ package org.elasticsearch.search; import org.elasticsearch.action.search.ClearScrollResponse; +import org.elasticsearch.action.search.ClosePointInTimeResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -21,9 +24,30 @@ import java.io.IOException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; public class ClearScrollResponseTests extends ESTestCase { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "clear_scroll", + true, + a -> new ClosePointInTimeResponse((boolean) a[0], (int) a[1]) + ); + static { + PARSER.declareField( + constructorArg(), + (parser, context) -> parser.booleanValue(), + ClearScrollResponse.SUCCEEDED, + ObjectParser.ValueType.BOOLEAN + ); + PARSER.declareField( + constructorArg(), + (parser, context) -> parser.intValue(), + ClearScrollResponse.NUMFREED, + ObjectParser.ValueType.INT + ); + } + public void testToXContent() throws IOException { ClearScrollResponse clearScrollResponse = new ClearScrollResponse(true, 10); try (XContentBuilder builder = JsonXContent.contentBuilder()) { @@ -39,7 +63,7 @@ public void testToAndFromXContent() throws IOException { BytesReference originalBytes = toShuffledXContent(originalResponse, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); ClearScrollResponse parsedResponse; try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { - parsedResponse = ClearScrollResponse.fromXContent(parser); + parsedResponse = PARSER.parse(parser, null); } assertEquals(originalResponse.isSucceeded(), parsedResponse.isSucceeded()); assertEquals(originalResponse.getNumFreed(), parsedResponse.getNumFreed()); From 6106da5d40e0049bf8ed8bd9ccbddecdbcdcf0dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Fri, 12 Jan 2024 17:11:04 +0100 Subject: [PATCH 03/95] [LTR] FieldValueExtrator - Checking if fetched values is empty. (#104314) * Checking if fetched values is empty. * Update docs/changelog/104314.yaml --- docs/changelog/104314.yaml | 5 +++++ .../xpack/ml/inference/ltr/FieldValueFeatureExtractor.java | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/104314.yaml diff --git a/docs/changelog/104314.yaml b/docs/changelog/104314.yaml new file mode 100644 index 0000000000000..a17e810a2c023 --- /dev/null +++ b/docs/changelog/104314.yaml @@ -0,0 +1,5 @@ +pr: 104314 +summary: "[LTR] `FieldValueExtrator` - Checking if fetched values is empty" +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/FieldValueFeatureExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/FieldValueFeatureExtractor.java index 5a2e3d29df949..9014c79f0af98 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/FieldValueFeatureExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/FieldValueFeatureExtractor.java @@ -53,7 +53,10 @@ public void setNextReader(LeafReaderContext segmentContext) { public void addFeatures(Map featureMap, int docId) throws IOException { Source source = sourceLookup.getSource(this.segmentContext, docId); for (FieldValueFetcher vf : this.valueFetcherList) { - featureMap.put(vf.fieldName(), vf.valueFetcher().fetchValues(source, docId, new ArrayList<>()).get(0)); + List values = vf.valueFetcher().fetchValues(source, docId, new ArrayList<>()); + if (values.isEmpty() == false) { + featureMap.put(vf.fieldName(), values.get(0)); + } } } From 08c9332350f841990d9f980021a20a6b19d45ee1 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Fri, 12 Jan 2024 17:23:40 +0100 Subject: [PATCH 04/95] ESQL: Update moved `heap-attack` QA suite's build group (#104312) This updates ESQL's `heap-attack` QA suite's build group. --- test/external-modules/esql-heap-attack/build.gradle | 1 + 1 file changed, 1 insertion(+) diff --git a/test/external-modules/esql-heap-attack/build.gradle b/test/external-modules/esql-heap-attack/build.gradle index 9f1cdfac61aa1..3a95f3f0b59c8 100644 --- a/test/external-modules/esql-heap-attack/build.gradle +++ b/test/external-modules/esql-heap-attack/build.gradle @@ -11,6 +11,7 @@ apply plugin: 'elasticsearch.internal-java-rest-test' // Necessary to use tests in Serverless apply plugin: 'elasticsearch.internal-test-artifact' +group = 'org.elasticsearch.plugin' esplugin { description 'A test module that can trigger out of memory' From 5d6b833fe6c98321534fca70fa8c44ee09f530fb Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Fri, 12 Jan 2024 16:50:00 +0000 Subject: [PATCH 05/95] ES|QL Minor async query doc parameter clarification (#104327) This commit adds a minor clarification to an ESQL async query doc parameter. --- docs/reference/esql/esql-async-query-api.asciidoc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/reference/esql/esql-async-query-api.asciidoc b/docs/reference/esql/esql-async-query-api.asciidoc index 0a78a923523cc..0d15eb313a61f 100644 --- a/docs/reference/esql/esql-async-query-api.asciidoc +++ b/docs/reference/esql/esql-async-query-api.asciidoc @@ -93,8 +93,9 @@ parameters: Timeout duration to wait for the request to finish. Defaults to a 1 second, meaning the request waits for 1 second for the query results. -If this parameter is specified and the request completes during this period, -complete results are returned. +If the query completes during this period then results will be +returned. Otherwise, a query `id` is returned that can later be used to +retrieve the results. If the request does not complete during this period, a query <> is returned. From 7311ab1785cd8e311cc40899f1404c87568cd566 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Fri, 12 Jan 2024 11:20:01 -0600 Subject: [PATCH 06/95] Prefer new test cluster framework for new FIPS setting (#104287) https://github.com/elastic/elasticsearch/pull/103483 introduced a new setting for FIPS only. Due to the way FIPS is configured with the elder gradle test cluster framework this setting was getting applied to elder clusters in BWC tests that did not have the settting causing test failures. The new test framework has better semantics for version specific configuration. This commit updates applies the new setting via the new framework with a version specific condition. Adding this setting to the test clusters is a simple way to test the setting (which will cause errors if the required providers are not found in the cluster). The pseudo test does not care which framework is used for configuration. Also, using the new framework allows to remove some hacky configuration previously needed to handle some elder test cluster configuration that used elder versions. Fixes: https://github.com/elastic/elasticsearch/issues/104234 --- .../src/main/groovy/elasticsearch.fips.gradle | 1 - .../local/FipsEnabledClusterConfigProvider.java | 1 + .../multi-cluster-tests-with-security/build.gradle | 13 ------------- .../multi-cluster-tests-with-security/build.gradle | 12 ------------ 4 files changed, 1 insertion(+), 26 deletions(-) diff --git a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle index aaae18401685a..f691d4bd996a7 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle @@ -79,7 +79,6 @@ if (BuildParams.inFipsJvm) { // with no x-pack. Tests having security explicitly enabled/disabled will override this setting setting 'xpack.security.enabled', 'false' setting 'xpack.security.fips_mode.enabled', 'true' - setting 'xpack.security.fips_mode.required_providers', '["BCFIPS", "BCJSSE"]' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.authc.password_hashing.algorithm', 'pbkdf2_stretch' keystorePassword 'keystore-password' diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/FipsEnabledClusterConfigProvider.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/FipsEnabledClusterConfigProvider.java index 473456f6b0cc3..3341b20a89d3c 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/FipsEnabledClusterConfigProvider.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/FipsEnabledClusterConfigProvider.java @@ -33,6 +33,7 @@ public void apply(LocalClusterSpecBuilder builder) { .setting("xpack.security.fips_mode.enabled", "true") .setting("xpack.license.self_generated.type", "trial") .setting("xpack.security.authc.password_hashing.algorithm", "pbkdf2_stretch") + .setting("xpack.security.fips_mode.required_providers", () -> "[BCFIPS, BCJSSE]", n -> n.getVersion().onOrAfter("8.13.0")) .keystorePassword("keystore-password"); } } diff --git a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle index 9d931974d25d5..d102490820a07 100644 --- a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle @@ -50,29 +50,16 @@ testClusters.register('mixed-cluster') { tasks.register('remote-cluster', RestIntegTestTask) { mustRunAfter("precommit") systemProperty 'tests.rest.suite', 'remote_cluster' - maybeDisableForFips(it) } tasks.register('mixed-cluster', RestIntegTestTask) { dependsOn 'remote-cluster' useCluster remoteCluster systemProperty 'tests.rest.suite', 'multi_cluster' - maybeDisableForFips(it) } tasks.register("integTest") { dependsOn 'mixed-cluster' - maybeDisableForFips(it) } tasks.named("check").configure { dependsOn("integTest") } - -//TODO: remove with version 8.14. A new FIPS setting was added in 8.13. Since FIPS configures all test clusters and this specific integTest uses -// the previous minor version, that setting is not available when running in FIPS until 8.14. -def maybeDisableForFips(task) { - if (BuildParams.inFipsJvm) { - if(Version.fromString(project.version).before(Version.fromString('8.14.0'))) { - task.enabled = false - } - } -} diff --git a/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle b/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle index 8f129789d46b7..ae98c08746fab 100644 --- a/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle +++ b/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle @@ -54,29 +54,17 @@ testClusters.register('mixed-cluster') { tasks.register('remote-cluster', RestIntegTestTask) { mustRunAfter("precommit") systemProperty 'tests.rest.suite', 'remote_cluster' - maybeDisableForFips(it) } tasks.register('mixed-cluster', RestIntegTestTask) { dependsOn 'remote-cluster' useCluster remoteCluster systemProperty 'tests.rest.suite', 'multi_cluster' - maybeDisableForFips(it) } tasks.register("integTest") { dependsOn 'mixed-cluster' - maybeDisableForFips(it) } tasks.named("check").configure { dependsOn("integTest") } -//TODO: remove with version 8.14. A new FIPS setting was added in 8.13. Since FIPS configures all test clusters and this specific integTest uses -// the previous minor version, that setting is not available when running in FIPS until 8.14. -def maybeDisableForFips(task) { - if (BuildParams.inFipsJvm) { - if(Version.fromString(project.version).before(Version.fromString('8.14.0'))) { - task.enabled = false - } - } -} From aa1a5138febbb467449b98393f1394a1e9424fd3 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 12 Jan 2024 09:39:31 -0800 Subject: [PATCH 07/95] Fail fast on heap attack tests (#104328) We can't use assume after a test fails. --- .../xpack/esql/heap_attack/HeapAttackIT.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java index 578c29d210797..102b65df1bfde 100644 --- a/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java +++ b/test/external-modules/esql-heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/heap_attack/HeapAttackIT.java @@ -75,6 +75,11 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } + @Before + public void skipOnAborted() { + assumeFalse("skip on aborted", SUITE_ABORTED); + } + /** * This used to fail, but we've since compacted top n so it actually succeeds now. */ @@ -552,7 +557,9 @@ private static void assertWriteResponse(Response response) throws IOException { @Before @After public void assertRequestBreakerEmpty() throws Exception { - assumeFalse("suite was aborted", SUITE_ABORTED); + if (SUITE_ABORTED) { + return; + } assertBusy(() -> { HttpEntity entity = adminClient().performRequest(new Request("GET", "/_nodes/stats")).getEntity(); Map stats = XContentHelper.convertToMap(XContentType.JSON.xContent(), entity.getContent(), false); From 4f261fc7628e8cad2bbd97ceb7e53854872d8be9 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 12 Jan 2024 09:40:34 -0800 Subject: [PATCH 08/95] Find break limits for DocBlock tests on fly (#104213) The test failure is related to #104159, where we had an overestimate of the RAM usage for X-ArrayVector. Instead of updating the break limits, this PR uses the breaker utility that @nik9000 wrote to dynamically compute the limits on-the-fly. Closes #104191 --- .../compute/data/DocVectorTests.java | 112 +++++++++++------- 1 file changed, 69 insertions(+), 43 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java index df1662e1dfb6d..2f9cf6ec57775 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java @@ -12,11 +12,13 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.compute.operator.ComputeTestCase; import org.elasticsearch.core.Releasables; +import org.elasticsearch.test.BreakerTestUtil; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.function.Function; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -62,34 +64,31 @@ public void testNonDecreasingDescendingDocs() { docs.close(); } - private static int MAX_BUILD_BREAKS_LIMIT = 1391; - public void testBuildBreaks() { - testBuildBreaks(ByteSizeValue.ofBytes(between(0, MAX_BUILD_BREAKS_LIMIT))); - } - - public void testBuildBreaksMax() { - testBuildBreaks(ByteSizeValue.ofBytes(MAX_BUILD_BREAKS_LIMIT)); - } - - private void testBuildBreaks(ByteSizeValue limit) { - int size = 100; - BlockFactory blockFactory = blockFactory(limit); - Exception e = expectThrows(CircuitBreakingException.class, () -> { - try (DocBlock.Builder builder = DocBlock.newBlockBuilder(blockFactory, size)) { - for (int r = 0; r < size; r++) { - builder.appendShard(3 - size % 4); - builder.appendSegment(size % 10); - builder.appendDoc(size); - } - builder.build().close(); - } + var maxBreakLimit = BreakerTestUtil.findBreakerLimit(ByteSizeValue.ofMb(128), limit -> { + BlockFactory blockFactory = blockFactory(limit); + buildDocBlock(blockFactory).close(); }); + var limit = ByteSizeValue.ofBytes(randomLongBetween(0, maxBreakLimit.getBytes())); + BlockFactory blockFactory = blockFactory(limit); + Exception e = expectThrows(CircuitBreakingException.class, () -> buildDocBlock(blockFactory).close()); assertThat(e.getMessage(), equalTo("over test limit")); logger.info("break position", e); assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); } + private DocBlock buildDocBlock(BlockFactory blockFactory) { + int size = 100; + try (DocBlock.Builder builder = DocBlock.newBlockBuilder(blockFactory, size)) { + for (int r = 0; r < size; r++) { + builder.appendShard(3 - r % 4); + builder.appendSegment(r % 10); + builder.appendDoc(size); + } + return builder.build(); + } + } + public void testShardSegmentDocMap() { assertShardSegmentDocMap( new int[][] { @@ -171,25 +170,31 @@ private void assertShardSegmentDocMap(int[][] data, int[][] expected) { assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); } - // TODO these are really difficult to maintain. can we figure these out of the fly? - private static final int MAX_SHARD_SEGMENT_DOC_MAP_BREAKS = 2220; - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104191") public void testShardSegmentDocMapBreaks() { - testShardSegmentDocMapBreaks(ByteSizeValue.ofBytes(between(MAX_BUILD_BREAKS_LIMIT + 1, MAX_SHARD_SEGMENT_DOC_MAP_BREAKS))); - } - - public void testShardSegmentDocMapBreaksMax() { - testShardSegmentDocMapBreaks(ByteSizeValue.ofBytes(MAX_SHARD_SEGMENT_DOC_MAP_BREAKS)); + ByteSizeValue buildBreakLimit = BreakerTestUtil.findBreakerLimit(ByteSizeValue.ofMb(128), limit -> { + BlockFactory blockFactory = blockFactory(limit); + buildDocBlock(blockFactory).close(); + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + }); + ByteSizeValue docMapBreakLimit = BreakerTestUtil.findBreakerLimit(ByteSizeValue.ofMb(128), limit -> { + BlockFactory blockFactory = blockFactory(limit); + try (DocBlock docBlock = buildDocBlock(blockFactory)) { + docBlock.asVector().shardSegmentDocMapForwards(); + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + }); + var limit = ByteSizeValue.ofBytes(randomLongBetween(buildBreakLimit.getBytes() + 1, docMapBreakLimit.getBytes())); + BlockFactory blockFactory = blockFactory(limit); + testShardSegmentDocMapBreaks(blockFactory); + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); } - private void testShardSegmentDocMapBreaks(ByteSizeValue limit) { + private void testShardSegmentDocMapBreaks(BlockFactory blockFactory) { int size = 100; - BlockFactory blockFactory = blockFactory(limit); try (DocBlock.Builder builder = DocBlock.newBlockBuilder(blockFactory, size)) { for (int r = 0; r < size; r++) { - builder.appendShard(3 - size % 4); - builder.appendSegment(size % 10); + builder.appendShard(3 - r % 4); + builder.appendSegment(r % 10); builder.appendDoc(size); } try (DocBlock docBlock = builder.build()) { @@ -255,15 +260,36 @@ public void testFilter() { } public void testFilterBreaks() { - BlockFactory factory = blockFactory(ByteSizeValue.ofBytes(between(250, 370))); - try ( - DocVector docs = new DocVector( - factory.newConstantIntVector(0, 10), - factory.newConstantIntVector(0, 10), - factory.newIntArrayVector(new int[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, 10), - false - ) - ) { + Function buildDocVector = factory -> { + IntVector shards = null; + IntVector segments = null; + IntVector docs = null; + DocVector result = null; + try { + shards = factory.newConstantIntVector(0, 10); + segments = factory.newConstantIntVector(0, 10); + docs = factory.newConstantIntVector(0, 10); + result = new DocVector(shards, segments, docs, false); + return result; + } finally { + if (result == null) { + Releasables.close(shards, segments, docs); + } + } + }; + ByteSizeValue buildBreakLimit = BreakerTestUtil.findBreakerLimit(ByteSizeValue.ofMb(128), limit -> { + BlockFactory factory = blockFactory(limit); + buildDocVector.apply(factory).close(); + }); + ByteSizeValue filterBreakLimit = BreakerTestUtil.findBreakerLimit(ByteSizeValue.ofMb(128), limit -> { + BlockFactory factory = blockFactory(limit); + try (DocVector docs = buildDocVector.apply(factory)) { + docs.filter(1, 2, 3).close(); + } + }); + ByteSizeValue limit = ByteSizeValue.ofBytes(randomLongBetween(buildBreakLimit.getBytes() + 1, filterBreakLimit.getBytes())); + BlockFactory factory = blockFactory(limit); + try (DocVector docs = buildDocVector.apply(factory)) { Exception e = expectThrows(CircuitBreakingException.class, () -> docs.filter(1, 2, 3)); assertThat(e.getMessage(), equalTo("over test limit")); } From d5ae347474b914c380afb07788c8689abfb0e85a Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Fri, 12 Jan 2024 11:40:55 -0600 Subject: [PATCH 09/95] Re-enabling MultiNodesStatsTests.testMultipleNodes() (#104329) --- .../org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java index 44859b73ffb2e..c8aae302e357b 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MultiNodesStatsTests.java @@ -41,7 +41,6 @@ public void cleanup() throws Exception { wipeMonitoringIndices(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/96374") public void testMultipleNodes() throws Exception { int nodes = 0; From 50ac28012d39d3f444e653f93cf4d05fbcff3533 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 12 Jan 2024 12:48:29 -0500 Subject: [PATCH 10/95] ESQL: Run async tests more carefully (#104330) The ESQL async tests run the ESQL yaml tests two extra time - once under the async endpoint with the `wait_for_completion_timeout` set to a long time and *again* with `wait_for_completion_timeout` set to a short time, expecting to receive an `id` for the query. That second way is tricky! Even with a `0ms` timeout sometimes the request will complete. That's great, but the tests didn't realize that was possible. And it's tricky to get the warnings and `catch` sections working properly with that. This reworks how we run these commands, breaking apart the way we run a single API and running it as two, taking into account that the "start the query" request could also complete the query. Closes #104294 --- .../rest/yaml/section/ApiCallSection.java | 8 +- .../test/rest/yaml/section/DoSection.java | 76 ++++++------ .../qa/single_node/EsqlClientYamlAsyncIT.java | 16 +-- .../EsqlClientYamlAsyncSubmitAndFetchIT.java | 112 +++++++++++++----- 4 files changed, 133 insertions(+), 79 deletions(-) diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java index 1708c5977486d..58c1e3b82e336 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ApiCallSection.java @@ -32,6 +32,10 @@ public ApiCallSection(String api) { this.api = api; } + public String getApi() { + return api; + } + public ApiCallSection copyWithNewApi(String api) { ApiCallSection copy = new ApiCallSection(api); for (var e : params.entrySet()) { @@ -45,10 +49,6 @@ public ApiCallSection copyWithNewApi(String api) { return copy; } - public String getApi() { - return api; - } - public Map getParams() { // make sure we never modify the parameters once returned return unmodifiableMap(params); diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index a23a433f812c2..00b92eac40d7f 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -343,7 +343,6 @@ public XContentLocation getLocation() { @Override public void execute(ClientYamlTestExecutionContext executionContext) throws IOException { - if ("param".equals(catchParam)) { // client should throw validation error before sending request // lets just return without doing anything as we don't have any client to test here @@ -359,17 +358,7 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx apiCallSection.getHeaders(), apiCallSection.getNodeSelector() ); - if (Strings.hasLength(catchParam)) { - String catchStatusCode; - if (CATCHES.containsKey(catchParam)) { - catchStatusCode = CATCHES.get(catchParam).v1(); - } else if (catchParam.startsWith("/") && catchParam.endsWith("/")) { - catchStatusCode = "4xx|5xx"; - } else { - throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported"); - } - fail(formatStatusCodeMessage(response, catchStatusCode)); - } + failIfHasCatch(response); final String testPath = executionContext.getClientYamlTestCandidate() != null ? executionContext.getClientYamlTestCandidate().getTestPath() : null; @@ -393,27 +382,23 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx } checkWarningHeaders(response.getWarningHeaders(), testPath); } catch (ClientYamlTestResponseException e) { - ClientYamlTestResponse restTestResponse = e.getRestTestResponse(); - if (Strings.hasLength(catchParam) == false) { - fail(formatStatusCodeMessage(restTestResponse, "2xx")); - } else if (CATCHES.containsKey(catchParam)) { - assertStatusCode(restTestResponse); - } else if (catchParam.length() > 2 && catchParam.startsWith("/") && catchParam.endsWith("/")) { - // the text of the error message matches regular expression - assertThat( - formatStatusCodeMessage(restTestResponse, "4xx|5xx"), - e.getResponseException().getResponse().getStatusLine().getStatusCode(), - greaterThanOrEqualTo(400) - ); - Object error = executionContext.response("error"); - assertThat("error was expected in the response", error, notNullValue()); - // remove delimiters from regex - String regex = catchParam.substring(1, catchParam.length() - 1); - assertThat("the error message was expected to match the provided regex but didn't", error.toString(), matches(regex)); - } else { - throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported"); - } + checkResponseException(e, executionContext); + } + } + + public void failIfHasCatch(ClientYamlTestResponse response) { + if (Strings.hasLength(catchParam) == false) { + return; + } + String catchStatusCode; + if (CATCHES.containsKey(catchParam)) { + catchStatusCode = CATCHES.get(catchParam).v1(); + } else if (catchParam.startsWith("/") && catchParam.endsWith("/")) { + catchStatusCode = "4xx|5xx"; + } else { + throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported"); } + fail(formatStatusCodeMessage(response, catchStatusCode)); } void checkElasticProductHeader(final List productHeaders) { @@ -448,7 +433,7 @@ void checkWarningHeaders(final List warningHeaders) { /** * Check that the response contains only the warning headers that we expect. */ - void checkWarningHeaders(final List warningHeaders, String testPath) { + public void checkWarningHeaders(final List warningHeaders, String testPath) { final List unexpected = new ArrayList<>(); final List unmatched = new ArrayList<>(); final List missing = new ArrayList<>(); @@ -536,6 +521,31 @@ void checkWarningHeaders(final List warningHeaders, String testPath) { } } + public void checkResponseException(ClientYamlTestResponseException e, ClientYamlTestExecutionContext executionContext) + throws IOException { + + ClientYamlTestResponse restTestResponse = e.getRestTestResponse(); + if (Strings.hasLength(catchParam) == false) { + fail(formatStatusCodeMessage(restTestResponse, "2xx")); + } else if (CATCHES.containsKey(catchParam)) { + assertStatusCode(restTestResponse); + } else if (catchParam.length() > 2 && catchParam.startsWith("/") && catchParam.endsWith("/")) { + // the text of the error message matches regular expression + assertThat( + formatStatusCodeMessage(restTestResponse, "4xx|5xx"), + e.getResponseException().getResponse().getStatusLine().getStatusCode(), + greaterThanOrEqualTo(400) + ); + Object error = executionContext.response("error"); + assertThat("error was expected in the response", error, notNullValue()); + // remove delimiters from regex + String regex = catchParam.substring(1, catchParam.length() - 1); + assertThat("the error message was expected to match the provided regex but didn't", error.toString(), matches(regex)); + } else { + throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported"); + } + } + private static void appendBadHeaders(final StringBuilder sb, final List headers, final String message) { if (headers.isEmpty() == false) { sb.append(message).append(" [\n"); diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncIT.java b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncIT.java index a38e34d7842d8..c2fa41a5241db 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncIT.java @@ -20,7 +20,6 @@ import java.util.List; import java.util.Map; import java.util.function.Function; -import java.util.stream.Stream; /** * Run the ESQL yaml tests against the async esql endpoint with a 30 minute {@code wait_until_completion_timeout}. @@ -40,11 +39,11 @@ public static Iterable parameters() throws Exception { body.put("wait_for_completion_timeout", "30m"); } doSection.setApiCallSection(copy); - return Stream.of(doSection); + return doSection; }); } - public static Iterable parameters(Function> modify) throws Exception { + public static Iterable parameters(Function modify) throws Exception { List result = new ArrayList<>(); for (Object[] orig : ESClientYamlSuiteTestCase.createParameters()) { assert orig.length == 1; @@ -54,7 +53,7 @@ public static Iterable parameters(Function modifyExecutableSection(e, modify)).toList() + candidate.getTestSection().getExecutableSections().stream().map(e -> modifyExecutableSection(e, modify)).toList() ); result.add(new Object[] { new ClientYamlTestCandidate(candidate.getRestTestSuite(), modified) }); } catch (IllegalArgumentException e) { @@ -64,12 +63,9 @@ public static Iterable parameters(Function modifyExecutableSection( - ExecutableSection e, - Function> modify - ) { + private static ExecutableSection modifyExecutableSection(ExecutableSection e, Function modify) { if (false == (e instanceof DoSection)) { - return Stream.of(e); + return e; } DoSection doSection = (DoSection) e; String api = doSection.getApiCallSection().getApi(); @@ -78,7 +74,7 @@ private static Stream modifyExecutableSection( case "esql.async_query", "esql.async_query_get" -> throw new IllegalArgumentException( "The esql yaml tests can't contain async_query or async_query_get because we modify them on the fly and *add* those." ); - default -> Stream.of(e); + default -> e; }; } } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncSubmitAndFetchIT.java b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncSubmitAndFetchIT.java index be2bfcb8a2787..34eb2421b0432 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncSubmitAndFetchIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncSubmitAndFetchIT.java @@ -9,19 +9,22 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; -import org.elasticsearch.test.rest.yaml.section.ApiCallSection; +import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; +import org.elasticsearch.test.rest.yaml.ClientYamlTestResponseException; import org.elasticsearch.test.rest.yaml.section.DoSection; +import org.elasticsearch.test.rest.yaml.section.ExecutableSection; +import org.elasticsearch.xcontent.XContentLocation; +import java.io.IOException; +import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.stream.Stream; /** * Run the ESQL yaml tests async and then fetch the results with a long wait time. */ -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104294") public class EsqlClientYamlAsyncSubmitAndFetchIT extends AbstractEsqlClientYamlIT { public EsqlClientYamlAsyncSubmitAndFetchIT(final ClientYamlTestCandidate testCandidate) { super(testCandidate); @@ -29,34 +32,79 @@ public EsqlClientYamlAsyncSubmitAndFetchIT(final ClientYamlTestCandidate testCan @ParametersFactory public static Iterable parameters() throws Exception { - return EsqlClientYamlAsyncIT.parameters(doSection -> { - ApiCallSection copy = doSection.getApiCallSection().copyWithNewApi("esql.async_query"); - for (Map body : copy.getBodies()) { - body.put("wait_for_completion_timeout", "0ms"); - body.put("keep_on_completion", true); + return EsqlClientYamlAsyncIT.parameters(DoEsqlAsync::new); + } + + private static class DoEsqlAsync implements ExecutableSection { + private final DoSection original; + + private DoEsqlAsync(DoSection original) { + this.original = original; + } + + @Override + public XContentLocation getLocation() { + return original.getLocation(); + } + + @Override + public void execute(ClientYamlTestExecutionContext executionContext) throws IOException { + try { + // Start the query + List> bodies = original.getApiCallSection().getBodies().stream().map(m -> { + Map body = new HashMap<>(m); + if (randomBoolean()) { + /* + * Try to force the request to go async by setting the timeout to 0. + * This doesn't *actually* force the request async - if it finishes + * super duper faster it won't get async. But that's life. + */ + body.put("wait_for_completion_timeout", "0ms"); + } + return body; + }).toList(); + ClientYamlTestResponse startResponse = executionContext.callApi( + "esql.async_query", + original.getApiCallSection().getParams(), + bodies, + original.getApiCallSection().getHeaders(), + original.getApiCallSection().getNodeSelector() + ); + + String id = (String) startResponse.evaluate("id"); + boolean finishedEarly = id == null; + if (finishedEarly) { + /* + * If we finished early, make sure we don't have a "catch" + * param and expect and error. And make sure we match the + * warnings folks have asked for. + */ + original.failIfHasCatch(startResponse); + original.checkWarningHeaders(startResponse.getWarningHeaders(), testPath(executionContext)); + return; + } + + /* + * Ok, we didn't finish before the timeout. Fine, let's fetch the result. + */ + ClientYamlTestResponse fetchResponse = executionContext.callApi( + "esql.async_query_get", + Map.of("wait_for_completion_timeout", "30m", "id", id), + List.of(), + original.getApiCallSection().getHeaders(), + original.getApiCallSection().getNodeSelector() + ); + original.failIfHasCatch(fetchResponse); + original.checkWarningHeaders(fetchResponse.getWarningHeaders(), testPath(executionContext)); + } catch (ClientYamlTestResponseException e) { + original.checkResponseException(e, executionContext); } - doSection.setApiCallSection(copy); - - DoSection fetch = new DoSection(doSection.getLocation()); - fetch.setApiCallSection(new ApiCallSection("esql.async_query_get")); - fetch.getApiCallSection().addParam("wait_for_completion_timeout", "30m"); - fetch.getApiCallSection().addParam("id", "$body.id"); - - /* - * The request to start the query doesn't make warnings or errors so shift - * those to the fetch. - */ - fetch.setExpectedWarningHeaders(doSection.getExpectedWarningHeaders()); - fetch.setExpectedWarningHeadersRegex(doSection.getExpectedWarningHeadersRegex()); - fetch.setAllowedWarningHeaders(doSection.getAllowedWarningHeaders()); - fetch.setAllowedWarningHeadersRegex(doSection.getAllowedWarningHeadersRegex()); - fetch.setCatch(doSection.getCatch()); - doSection.setExpectedWarningHeaders(List.of()); - doSection.setExpectedWarningHeadersRegex(List.of()); - doSection.setAllowedWarningHeaders(List.of()); - doSection.setAllowedWarningHeadersRegex(List.of()); - doSection.setCatch(null); - return Stream.of(doSection, fetch); - }); + } + + private String testPath(ClientYamlTestExecutionContext executionContext) { + return executionContext.getClientYamlTestCandidate() != null + ? executionContext.getClientYamlTestCandidate().getTestPath() + : null; + } } } From 2a79d781eb13da6d132ca4c15f1edf6a38e21a93 Mon Sep 17 00:00:00 2001 From: James Baiera Date: Fri, 12 Jan 2024 12:55:40 -0500 Subject: [PATCH 11/95] Data streams fix failure store delete (#104281) This PR adds the any failure store indices to the list of indices to be deleted when deleting a data stream. --- docs/changelog/104281.yaml | 5 ++ .../DeleteDataStreamTransportAction.java | 1 + .../DeleteDataStreamTransportActionTests.java | 25 ++++++ .../GetDataStreamsTransportActionTests.java | 1 + .../test/data_stream/10_basic.yml | 79 +++++++++++++++++++ .../metadata/DataStreamTestHelper.java | 55 +++++++++++-- 6 files changed, 161 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/104281.yaml diff --git a/docs/changelog/104281.yaml b/docs/changelog/104281.yaml new file mode 100644 index 0000000000000..087e91d83ab3b --- /dev/null +++ b/docs/changelog/104281.yaml @@ -0,0 +1,5 @@ +pr: 104281 +summary: Data streams fix failure store delete +area: Data streams +type: bug +issues: [] diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java index e756ba32ec699..6e7528c470d49 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java @@ -155,6 +155,7 @@ static ClusterState removeDataStream( DataStream dataStream = currentState.metadata().dataStreams().get(dataStreamName); assert dataStream != null; backingIndicesToRemove.addAll(dataStream.getIndices()); + backingIndicesToRemove.addAll(dataStream.getFailureIndices()); } // first delete the data streams and then the indices: diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java index 29c88b7f75463..a5c3b348b1f1b 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInProgressException; import org.elasticsearch.test.ESTestCase; +import org.junit.Assume; import java.util.Collections; import java.util.List; @@ -55,6 +56,30 @@ public void testDeleteDataStream() { } } + public void testDeleteDataStreamWithFailureStore() { + Assume.assumeTrue(DataStream.isFailureStoreEnabled()); + + final String dataStreamName = "my-data-stream"; + final List otherIndices = randomSubsetOf(List.of("foo", "bar", "baz")); + + ClusterState cs = DataStreamTestHelper.getClusterStateWithDataStreams( + List.of(new Tuple<>(dataStreamName, 2)), + otherIndices, + System.currentTimeMillis(), + Settings.EMPTY, + 1, + false, + true + ); + DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { dataStreamName }); + ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(iner, cs, req, validator, Settings.EMPTY); + assertThat(newState.metadata().dataStreams().size(), equalTo(0)); + assertThat(newState.metadata().indices().size(), equalTo(otherIndices.size())); + for (String indexName : otherIndices) { + assertThat(newState.metadata().indices().get(indexName).getIndex().getName(), equalTo(indexName)); + } + } + public void testDeleteMultipleDataStreams() { String[] dataStreamNames = { "foo", "bar", "baz", "eggplant" }; ClusterState cs = DataStreamTestHelper.getClusterStateWithDataStreams( diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java index c24d386dcb26e..637fb44affb6f 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/GetDataStreamsTransportActionTests.java @@ -215,6 +215,7 @@ public void testGetTimeSeriesMixedDataStream() { instant.toEpochMilli(), Settings.EMPTY, 0, + false, false ); DataStreamTestHelper.getClusterStateWithDataStream(mBuilder, dataStream1, List.of(new Tuple<>(twoHoursAgo, twoHoursAhead))); diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml index 6496930764ab8..f5837f6d8c286 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml @@ -303,6 +303,11 @@ setup: name: failure-data-stream2 - is_true: acknowledged + - do: + indices.delete_index_template: + name: my-template4 + - is_true: acknowledged + --- "Create data stream with invalid name": - skip: @@ -530,6 +535,80 @@ setup: indices.get: index: $idx0name +--- +"Delete data stream with failure stores": + - skip: + version: " - 8.11.99" + reason: "data streams only supported in 8.12+" + + - do: + allowed_warnings: + - "index template [my-template4] has index patterns [failure-data-stream1, failure-data-stream2] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template4] will take precedence during new index creation" + indices.put_index_template: + name: my-template4 + body: + index_patterns: [ failure-data-stream1 ] + data_stream: + failure_store: true + + - do: + indices.create_data_stream: + name: failure-data-stream1 + - is_true: acknowledged + + - do: + indices.create: + index: test_index + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 + + # save the backing index names for later use + - do: + indices.get_data_stream: + name: failure-data-stream1 + + - set: { data_streams.0.indices.0.index_name: idx0name } + - set: { data_streams.0.failure_indices.0.index_name: fs0name } + + - do: + indices.get: + index: ['.ds-failure-data-stream1-*000001', 'test_index'] + + - is_true: test_index.settings + - is_true: .$idx0name.settings + + - do: + indices.get_data_stream: {} + - match: { data_streams.0.name: failure-data-stream1 } + - match: { data_streams.0.timestamp_field.name: '@timestamp' } + - match: { data_streams.0.generation: 1 } + - length: { data_streams.0.indices: 1 } + - match: { data_streams.0.indices.0.index_name: '/\.ds-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - length: { data_streams.0.failure_indices: 1 } + - match: { data_streams.0.failure_indices.0.index_name: '/\.fs-failure-data-stream1-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + + - do: + indices.delete_data_stream: + name: failure-data-stream1 + - is_true: acknowledged + + - do: + catch: missing + indices.get: + index: $idx0name + + - do: + catch: missing + indices.get: + index: $fs0name + + - do: + indices.delete_index_template: + name: my-template4 + - is_true: acknowledged + --- "Delete data stream missing behaviour": - skip: diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 5c5123e03454f..d0b30bff92f3e 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -68,6 +68,7 @@ import static org.elasticsearch.cluster.metadata.DataStream.BACKING_INDEX_PREFIX; import static org.elasticsearch.cluster.metadata.DataStream.DATE_FORMATTER; import static org.elasticsearch.cluster.metadata.DataStream.getDefaultBackingIndexName; +import static org.elasticsearch.cluster.metadata.DataStream.getDefaultFailureStoreName; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; import static org.elasticsearch.test.ESTestCase.generateRandomStringArray; import static org.elasticsearch.test.ESTestCase.randomAlphaOfLength; @@ -111,7 +112,19 @@ public static DataStream newInstance( boolean replicated, @Nullable DataStreamLifecycle lifecycle ) { - return new DataStream(name, indices, generation, metadata, false, replicated, false, false, null, lifecycle, false, List.of()); + return newInstance(name, indices, generation, metadata, replicated, lifecycle, List.of()); + } + + public static DataStream newInstance( + String name, + List indices, + long generation, + Map metadata, + boolean replicated, + @Nullable DataStreamLifecycle lifecycle, + List failureStores + ) { + return new DataStream(name, indices, generation, metadata, false, replicated, false, false, null, lifecycle, false, failureStores); } public static String getLegacyDefaultBackingIndexName( @@ -318,9 +331,21 @@ public static ClusterState getClusterStateWithDataStreams( Settings settings, int replicas, boolean replicated + ) { + return getClusterStateWithDataStreams(dataStreams, indexNames, currentTime, settings, replicas, replicated, false); + } + + public static ClusterState getClusterStateWithDataStreams( + List> dataStreams, + List indexNames, + long currentTime, + Settings settings, + int replicas, + boolean replicated, + boolean storeFailures ) { Metadata.Builder builder = Metadata.builder(); - getClusterStateWithDataStreams(builder, dataStreams, indexNames, currentTime, settings, replicas, replicated); + getClusterStateWithDataStreams(builder, dataStreams, indexNames, currentTime, settings, replicas, replicated, storeFailures); return ClusterState.builder(new ClusterName("_name")).metadata(builder).build(); } @@ -331,13 +356,16 @@ public static void getClusterStateWithDataStreams( long currentTime, Settings settings, int replicas, - boolean replicated + boolean replicated, + boolean storeFailures ) { builder.put( "template_1", ComposableIndexTemplate.builder() .indexPatterns(List.of("*")) - .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate()) + .dataStreamTemplate( + new ComposableIndexTemplate.DataStreamTemplate(false, false, DataStream.isFailureStoreEnabled() && storeFailures) + ) .build() ); @@ -351,12 +379,29 @@ public static void getClusterStateWithDataStreams( } allIndices.addAll(backingIndices); + List failureStores = new ArrayList<>(); + if (DataStream.isFailureStoreEnabled() && storeFailures) { + for (int failureStoreNumber = 1; failureStoreNumber <= dsTuple.v2(); failureStoreNumber++) { + failureStores.add( + createIndexMetadata( + getDefaultFailureStoreName(dsTuple.v1(), failureStoreNumber, currentTime), + true, + settings, + replicas + ) + ); + } + allIndices.addAll(failureStores); + } + DataStream ds = DataStreamTestHelper.newInstance( dsTuple.v1(), backingIndices.stream().map(IndexMetadata::getIndex).collect(Collectors.toList()), dsTuple.v2(), null, - replicated + replicated, + null, + failureStores.stream().map(IndexMetadata::getIndex).collect(Collectors.toList()) ); builder.put(ds); } From a84ce721f6a9e66b111c58b7ac6169b5aef6ab4d Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 12 Jan 2024 13:03:12 -0500 Subject: [PATCH 12/95] ESQL: Fix bug in topn tests (#104210) This fixes a bug in the topn tests which failed because we tried to sort of a geo field. Geo fields aren't valid sort keys but the test is randomized and rarely picks them. This stops it from picking them. Most of the text of this change is actually just me making debugging easier. Closes #104167 --- .../operator/topn/KeyExtractorForBoolean.java | 41 ++++++++++------- .../topn/KeyExtractorForBytesRef.java | 41 ++++++++++------- .../operator/topn/KeyExtractorForDouble.java | 41 ++++++++++------- .../operator/topn/KeyExtractorForInt.java | 41 ++++++++++------- .../operator/topn/KeyExtractorForLong.java | 41 ++++++++++------- .../topn/ResultBuilderForBoolean.java | 4 ++ .../topn/ResultBuilderForBytesRef.java | 4 ++ .../operator/topn/ResultBuilderForDouble.java | 4 ++ .../operator/topn/ResultBuilderForInt.java | 4 ++ .../operator/topn/ResultBuilderForLong.java | 4 ++ .../topn/ValueExtractorForBoolean.java | 4 ++ .../topn/ValueExtractorForBytesRef.java | 4 ++ .../topn/ValueExtractorForDouble.java | 4 ++ .../operator/topn/ValueExtractorForInt.java | 4 ++ .../operator/topn/ValueExtractorForLong.java | 4 ++ .../operator/topn/X-KeyExtractor.java.st | 45 ++++++++++++------- .../operator/topn/X-ResultBuilder.java.st | 4 ++ .../operator/topn/X-ValueExtractor.java.st | 4 ++ .../operator/topn/TopNOperatorTests.java | 30 ++++++++----- 19 files changed, 226 insertions(+), 102 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForBoolean.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForBoolean.java index 40fe7ffdde661..b537b6d96fc9d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForBoolean.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForBoolean.java @@ -11,20 +11,26 @@ import org.elasticsearch.compute.data.BooleanVector; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import java.util.Locale; + +/** + * Extracts sort keys for top-n from their {@link BooleanBlock}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ abstract class KeyExtractorForBoolean implements KeyExtractor { static KeyExtractorForBoolean extractorFor(TopNEncoder encoder, boolean ascending, byte nul, byte nonNul, BooleanBlock block) { BooleanVector v = block.asVector(); if (v != null) { - return new KeyExtractorForBoolean.ForVector(encoder, nul, nonNul, v); + return new KeyExtractorForBoolean.FromVector(encoder, nul, nonNul, v); } if (ascending) { return block.mvSortedAscending() - ? new KeyExtractorForBoolean.MinForAscending(encoder, nul, nonNul, block) - : new KeyExtractorForBoolean.MinForUnordered(encoder, nul, nonNul, block); + ? new KeyExtractorForBoolean.MinFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorForBoolean.MinFromUnorderedBlock(encoder, nul, nonNul, block); } return block.mvSortedAscending() - ? new KeyExtractorForBoolean.MaxForAscending(encoder, nul, nonNul, block) - : new KeyExtractorForBoolean.MaxForUnordered(encoder, nul, nonNul, block); + ? new KeyExtractorForBoolean.MaxFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorForBoolean.MaxFromUnorderedBlock(encoder, nul, nonNul, block); } private final byte nul; @@ -47,10 +53,15 @@ protected final int nul(BreakingBytesRefBuilder key) { return 1; } - static class ForVector extends KeyExtractorForBoolean { + @Override + public final String toString() { + return String.format(Locale.ROOT, "KeyExtractorForBoolean%s(%s, %s)", getClass().getSimpleName(), nul, nonNul); + } + + static class FromVector extends KeyExtractorForBoolean { private final BooleanVector vector; - ForVector(TopNEncoder encoder, byte nul, byte nonNul, BooleanVector vector) { + FromVector(TopNEncoder encoder, byte nul, byte nonNul, BooleanVector vector) { super(encoder, nul, nonNul); this.vector = vector; } @@ -61,10 +72,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MinForAscending extends KeyExtractorForBoolean { + static class MinFromAscendingBlock extends KeyExtractorForBoolean { private final BooleanBlock block; - MinForAscending(TopNEncoder encoder, byte nul, byte nonNul, BooleanBlock block) { + MinFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, BooleanBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -78,10 +89,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MaxForAscending extends KeyExtractorForBoolean { + static class MaxFromAscendingBlock extends KeyExtractorForBoolean { private final BooleanBlock block; - MaxForAscending(TopNEncoder encoder, byte nul, byte nonNul, BooleanBlock block) { + MaxFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, BooleanBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -95,10 +106,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MinForUnordered extends KeyExtractorForBoolean { + static class MinFromUnorderedBlock extends KeyExtractorForBoolean { private final BooleanBlock block; - MinForUnordered(TopNEncoder encoder, byte nul, byte nonNul, BooleanBlock block) { + MinFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, BooleanBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -120,10 +131,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MaxForUnordered extends KeyExtractorForBoolean { + static class MaxFromUnorderedBlock extends KeyExtractorForBoolean { private final BooleanBlock block; - MaxForUnordered(TopNEncoder encoder, byte nul, byte nonNul, BooleanBlock block) { + MaxFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, BooleanBlock block) { super(encoder, nul, nonNul); this.block = block; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForBytesRef.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForBytesRef.java index 2f546a46aaeaf..bf07905019dad 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForBytesRef.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForBytesRef.java @@ -12,20 +12,26 @@ import org.elasticsearch.compute.data.BytesRefVector; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import java.util.Locale; + +/** + * Extracts sort keys for top-n from their {@link BytesRefBlock}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ abstract class KeyExtractorForBytesRef implements KeyExtractor { static KeyExtractorForBytesRef extractorFor(TopNEncoder encoder, boolean ascending, byte nul, byte nonNul, BytesRefBlock block) { BytesRefVector v = block.asVector(); if (v != null) { - return new KeyExtractorForBytesRef.ForVector(encoder, nul, nonNul, v); + return new KeyExtractorForBytesRef.FromVector(encoder, nul, nonNul, v); } if (ascending) { return block.mvSortedAscending() - ? new KeyExtractorForBytesRef.MinForAscending(encoder, nul, nonNul, block) - : new KeyExtractorForBytesRef.MinForUnordered(encoder, nul, nonNul, block); + ? new KeyExtractorForBytesRef.MinFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorForBytesRef.MinFromUnorderedBlock(encoder, nul, nonNul, block); } return block.mvSortedAscending() - ? new KeyExtractorForBytesRef.MaxForAscending(encoder, nul, nonNul, block) - : new KeyExtractorForBytesRef.MaxForUnordered(encoder, nul, nonNul, block); + ? new KeyExtractorForBytesRef.MaxFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorForBytesRef.MaxFromUnorderedBlock(encoder, nul, nonNul, block); } private final TopNEncoder encoder; @@ -49,10 +55,15 @@ protected final int nul(BreakingBytesRefBuilder key) { return 1; } - static class ForVector extends KeyExtractorForBytesRef { + @Override + public final String toString() { + return String.format(Locale.ROOT, "KeyExtractorForBytesRef%s(%s, %s, %s)", getClass().getSimpleName(), encoder, nul, nonNul); + } + + static class FromVector extends KeyExtractorForBytesRef { private final BytesRefVector vector; - ForVector(TopNEncoder encoder, byte nul, byte nonNul, BytesRefVector vector) { + FromVector(TopNEncoder encoder, byte nul, byte nonNul, BytesRefVector vector) { super(encoder, nul, nonNul); this.vector = vector; } @@ -63,10 +74,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MinForAscending extends KeyExtractorForBytesRef { + static class MinFromAscendingBlock extends KeyExtractorForBytesRef { private final BytesRefBlock block; - MinForAscending(TopNEncoder encoder, byte nul, byte nonNul, BytesRefBlock block) { + MinFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, BytesRefBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -80,10 +91,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MaxForAscending extends KeyExtractorForBytesRef { + static class MaxFromAscendingBlock extends KeyExtractorForBytesRef { private final BytesRefBlock block; - MaxForAscending(TopNEncoder encoder, byte nul, byte nonNul, BytesRefBlock block) { + MaxFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, BytesRefBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -97,12 +108,12 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MinForUnordered extends KeyExtractorForBytesRef { + static class MinFromUnorderedBlock extends KeyExtractorForBytesRef { private final BytesRefBlock block; private final BytesRef minScratch = new BytesRef(); - MinForUnordered(TopNEncoder encoder, byte nul, byte nonNul, BytesRefBlock block) { + MinFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, BytesRefBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -128,12 +139,12 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MaxForUnordered extends KeyExtractorForBytesRef { + static class MaxFromUnorderedBlock extends KeyExtractorForBytesRef { private final BytesRefBlock block; private final BytesRef maxScratch = new BytesRef(); - MaxForUnordered(TopNEncoder encoder, byte nul, byte nonNul, BytesRefBlock block) { + MaxFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, BytesRefBlock block) { super(encoder, nul, nonNul); this.block = block; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForDouble.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForDouble.java index 5e821b9e24db5..03477a65a3cde 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForDouble.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForDouble.java @@ -11,20 +11,26 @@ import org.elasticsearch.compute.data.DoubleVector; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import java.util.Locale; + +/** + * Extracts sort keys for top-n from their {@link DoubleBlock}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ abstract class KeyExtractorForDouble implements KeyExtractor { static KeyExtractorForDouble extractorFor(TopNEncoder encoder, boolean ascending, byte nul, byte nonNul, DoubleBlock block) { DoubleVector v = block.asVector(); if (v != null) { - return new KeyExtractorForDouble.ForVector(encoder, nul, nonNul, v); + return new KeyExtractorForDouble.FromVector(encoder, nul, nonNul, v); } if (ascending) { return block.mvSortedAscending() - ? new KeyExtractorForDouble.MinForAscending(encoder, nul, nonNul, block) - : new KeyExtractorForDouble.MinForUnordered(encoder, nul, nonNul, block); + ? new KeyExtractorForDouble.MinFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorForDouble.MinFromUnorderedBlock(encoder, nul, nonNul, block); } return block.mvSortedAscending() - ? new KeyExtractorForDouble.MaxForAscending(encoder, nul, nonNul, block) - : new KeyExtractorForDouble.MaxForUnordered(encoder, nul, nonNul, block); + ? new KeyExtractorForDouble.MaxFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorForDouble.MaxFromUnorderedBlock(encoder, nul, nonNul, block); } private final byte nul; @@ -47,10 +53,15 @@ protected final int nul(BreakingBytesRefBuilder key) { return 1; } - static class ForVector extends KeyExtractorForDouble { + @Override + public final String toString() { + return String.format(Locale.ROOT, "KeyExtractorForDouble%s(%s, %s)", getClass().getSimpleName(), nul, nonNul); + } + + static class FromVector extends KeyExtractorForDouble { private final DoubleVector vector; - ForVector(TopNEncoder encoder, byte nul, byte nonNul, DoubleVector vector) { + FromVector(TopNEncoder encoder, byte nul, byte nonNul, DoubleVector vector) { super(encoder, nul, nonNul); this.vector = vector; } @@ -61,10 +72,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MinForAscending extends KeyExtractorForDouble { + static class MinFromAscendingBlock extends KeyExtractorForDouble { private final DoubleBlock block; - MinForAscending(TopNEncoder encoder, byte nul, byte nonNul, DoubleBlock block) { + MinFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, DoubleBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -78,10 +89,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MaxForAscending extends KeyExtractorForDouble { + static class MaxFromAscendingBlock extends KeyExtractorForDouble { private final DoubleBlock block; - MaxForAscending(TopNEncoder encoder, byte nul, byte nonNul, DoubleBlock block) { + MaxFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, DoubleBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -95,10 +106,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MinForUnordered extends KeyExtractorForDouble { + static class MinFromUnorderedBlock extends KeyExtractorForDouble { private final DoubleBlock block; - MinForUnordered(TopNEncoder encoder, byte nul, byte nonNul, DoubleBlock block) { + MinFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, DoubleBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -119,10 +130,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MaxForUnordered extends KeyExtractorForDouble { + static class MaxFromUnorderedBlock extends KeyExtractorForDouble { private final DoubleBlock block; - MaxForUnordered(TopNEncoder encoder, byte nul, byte nonNul, DoubleBlock block) { + MaxFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, DoubleBlock block) { super(encoder, nul, nonNul); this.block = block; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForInt.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForInt.java index d4269a622f098..5f45df662efdd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForInt.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForInt.java @@ -11,20 +11,26 @@ import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import java.util.Locale; + +/** + * Extracts sort keys for top-n from their {@link IntBlock}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ abstract class KeyExtractorForInt implements KeyExtractor { static KeyExtractorForInt extractorFor(TopNEncoder encoder, boolean ascending, byte nul, byte nonNul, IntBlock block) { IntVector v = block.asVector(); if (v != null) { - return new KeyExtractorForInt.ForVector(encoder, nul, nonNul, v); + return new KeyExtractorForInt.FromVector(encoder, nul, nonNul, v); } if (ascending) { return block.mvSortedAscending() - ? new KeyExtractorForInt.MinForAscending(encoder, nul, nonNul, block) - : new KeyExtractorForInt.MinForUnordered(encoder, nul, nonNul, block); + ? new KeyExtractorForInt.MinFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorForInt.MinFromUnorderedBlock(encoder, nul, nonNul, block); } return block.mvSortedAscending() - ? new KeyExtractorForInt.MaxForAscending(encoder, nul, nonNul, block) - : new KeyExtractorForInt.MaxForUnordered(encoder, nul, nonNul, block); + ? new KeyExtractorForInt.MaxFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorForInt.MaxFromUnorderedBlock(encoder, nul, nonNul, block); } private final byte nul; @@ -47,10 +53,15 @@ protected final int nul(BreakingBytesRefBuilder key) { return 1; } - static class ForVector extends KeyExtractorForInt { + @Override + public final String toString() { + return String.format(Locale.ROOT, "KeyExtractorForInt%s(%s, %s)", getClass().getSimpleName(), nul, nonNul); + } + + static class FromVector extends KeyExtractorForInt { private final IntVector vector; - ForVector(TopNEncoder encoder, byte nul, byte nonNul, IntVector vector) { + FromVector(TopNEncoder encoder, byte nul, byte nonNul, IntVector vector) { super(encoder, nul, nonNul); this.vector = vector; } @@ -61,10 +72,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MinForAscending extends KeyExtractorForInt { + static class MinFromAscendingBlock extends KeyExtractorForInt { private final IntBlock block; - MinForAscending(TopNEncoder encoder, byte nul, byte nonNul, IntBlock block) { + MinFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, IntBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -78,10 +89,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MaxForAscending extends KeyExtractorForInt { + static class MaxFromAscendingBlock extends KeyExtractorForInt { private final IntBlock block; - MaxForAscending(TopNEncoder encoder, byte nul, byte nonNul, IntBlock block) { + MaxFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, IntBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -95,10 +106,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MinForUnordered extends KeyExtractorForInt { + static class MinFromUnorderedBlock extends KeyExtractorForInt { private final IntBlock block; - MinForUnordered(TopNEncoder encoder, byte nul, byte nonNul, IntBlock block) { + MinFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, IntBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -119,10 +130,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MaxForUnordered extends KeyExtractorForInt { + static class MaxFromUnorderedBlock extends KeyExtractorForInt { private final IntBlock block; - MaxForUnordered(TopNEncoder encoder, byte nul, byte nonNul, IntBlock block) { + MaxFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, IntBlock block) { super(encoder, nul, nonNul); this.block = block; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForLong.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForLong.java index 6a200efff529d..e61ab644ecfe1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForLong.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/KeyExtractorForLong.java @@ -11,20 +11,26 @@ import org.elasticsearch.compute.data.LongVector; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import java.util.Locale; + +/** + * Extracts sort keys for top-n from their {@link LongBlock}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ abstract class KeyExtractorForLong implements KeyExtractor { static KeyExtractorForLong extractorFor(TopNEncoder encoder, boolean ascending, byte nul, byte nonNul, LongBlock block) { LongVector v = block.asVector(); if (v != null) { - return new KeyExtractorForLong.ForVector(encoder, nul, nonNul, v); + return new KeyExtractorForLong.FromVector(encoder, nul, nonNul, v); } if (ascending) { return block.mvSortedAscending() - ? new KeyExtractorForLong.MinForAscending(encoder, nul, nonNul, block) - : new KeyExtractorForLong.MinForUnordered(encoder, nul, nonNul, block); + ? new KeyExtractorForLong.MinFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorForLong.MinFromUnorderedBlock(encoder, nul, nonNul, block); } return block.mvSortedAscending() - ? new KeyExtractorForLong.MaxForAscending(encoder, nul, nonNul, block) - : new KeyExtractorForLong.MaxForUnordered(encoder, nul, nonNul, block); + ? new KeyExtractorForLong.MaxFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorForLong.MaxFromUnorderedBlock(encoder, nul, nonNul, block); } private final byte nul; @@ -47,10 +53,15 @@ protected final int nul(BreakingBytesRefBuilder key) { return 1; } - static class ForVector extends KeyExtractorForLong { + @Override + public final String toString() { + return String.format(Locale.ROOT, "KeyExtractorForLong%s(%s, %s)", getClass().getSimpleName(), nul, nonNul); + } + + static class FromVector extends KeyExtractorForLong { private final LongVector vector; - ForVector(TopNEncoder encoder, byte nul, byte nonNul, LongVector vector) { + FromVector(TopNEncoder encoder, byte nul, byte nonNul, LongVector vector) { super(encoder, nul, nonNul); this.vector = vector; } @@ -61,10 +72,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MinForAscending extends KeyExtractorForLong { + static class MinFromAscendingBlock extends KeyExtractorForLong { private final LongBlock block; - MinForAscending(TopNEncoder encoder, byte nul, byte nonNul, LongBlock block) { + MinFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, LongBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -78,10 +89,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MaxForAscending extends KeyExtractorForLong { + static class MaxFromAscendingBlock extends KeyExtractorForLong { private final LongBlock block; - MaxForAscending(TopNEncoder encoder, byte nul, byte nonNul, LongBlock block) { + MaxFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, LongBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -95,10 +106,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MinForUnordered extends KeyExtractorForLong { + static class MinFromUnorderedBlock extends KeyExtractorForLong { private final LongBlock block; - MinForUnordered(TopNEncoder encoder, byte nul, byte nonNul, LongBlock block) { + MinFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, LongBlock block) { super(encoder, nul, nonNul); this.block = block; } @@ -119,10 +130,10 @@ public int writeKey(BreakingBytesRefBuilder key, int position) { } } - static class MaxForUnordered extends KeyExtractorForLong { + static class MaxFromUnorderedBlock extends KeyExtractorForLong { private final LongBlock block; - MaxForUnordered(TopNEncoder encoder, byte nul, byte nonNul, LongBlock block) { + MaxFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, LongBlock block) { super(encoder, nul, nonNul); this.block = block; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBoolean.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBoolean.java index 184ef69f00d85..e6b8d70a63ed7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBoolean.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBoolean.java @@ -11,6 +11,10 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; +/** + * Builds the resulting {@link BooleanBlock} for some column in a top-n. + * This class is generated. Edit {@code X-ResultBuilder.java.st} instead. + */ class ResultBuilderForBoolean implements ResultBuilder { private final BooleanBlock.Builder builder; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBytesRef.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBytesRef.java index 4008f7fbd924b..637cddb9b3089 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBytesRef.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBytesRef.java @@ -11,6 +11,10 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BytesRefBlock; +/** + * Builds the resulting {@link BytesRefBlock} for some column in a top-n. + * This class is generated. Edit {@code X-ResultBuilder.java.st} instead. + */ class ResultBuilderForBytesRef implements ResultBuilder { private final BytesRefBlock.Builder builder; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForDouble.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForDouble.java index f06a1e814ef43..e7119ee714c34 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForDouble.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForDouble.java @@ -11,6 +11,10 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DoubleBlock; +/** + * Builds the resulting {@link DoubleBlock} for some column in a top-n. + * This class is generated. Edit {@code X-ResultBuilder.java.st} instead. + */ class ResultBuilderForDouble implements ResultBuilder { private final DoubleBlock.Builder builder; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForInt.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForInt.java index 848bbf9ab6a0a..ad1236975141b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForInt.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForInt.java @@ -11,6 +11,10 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.IntBlock; +/** + * Builds the resulting {@link IntBlock} for some column in a top-n. + * This class is generated. Edit {@code X-ResultBuilder.java.st} instead. + */ class ResultBuilderForInt implements ResultBuilder { private final IntBlock.Builder builder; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForLong.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForLong.java index b4361ad83180a..cad392c3d525c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForLong.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForLong.java @@ -11,6 +11,10 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.LongBlock; +/** + * Builds the resulting {@link LongBlock} for some column in a top-n. + * This class is generated. Edit {@code X-ResultBuilder.java.st} instead. + */ class ResultBuilderForLong implements ResultBuilder { private final LongBlock.Builder builder; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForBoolean.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForBoolean.java index b13dd3ce7f2b0..535618da01727 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForBoolean.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForBoolean.java @@ -11,6 +11,10 @@ import org.elasticsearch.compute.data.BooleanVector; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +/** + * Extracts non-sort-key values for top-n from their {@link BooleanBlock}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ abstract class ValueExtractorForBoolean implements ValueExtractor { static ValueExtractorForBoolean extractorFor(TopNEncoder encoder, boolean inKey, BooleanBlock block) { BooleanVector vector = block.asVector(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForBytesRef.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForBytesRef.java index 65c5da5737a59..70065fd544759 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForBytesRef.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForBytesRef.java @@ -12,6 +12,10 @@ import org.elasticsearch.compute.data.BytesRefVector; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +/** + * Extracts non-sort-key values for top-n from their {@link BytesRefBlock}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ abstract class ValueExtractorForBytesRef implements ValueExtractor { static ValueExtractorForBytesRef extractorFor(TopNEncoder encoder, boolean inKey, BytesRefBlock block) { BytesRefVector vector = block.asVector(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForDouble.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForDouble.java index d20f2bf53972a..b504196dff7e1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForDouble.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForDouble.java @@ -11,6 +11,10 @@ import org.elasticsearch.compute.data.DoubleVector; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +/** + * Extracts non-sort-key values for top-n from their {@link DoubleBlock}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ abstract class ValueExtractorForDouble implements ValueExtractor { static ValueExtractorForDouble extractorFor(TopNEncoder encoder, boolean inKey, DoubleBlock block) { DoubleVector vector = block.asVector(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForInt.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForInt.java index d20368f874e8e..485d9f4bb8559 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForInt.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForInt.java @@ -11,6 +11,10 @@ import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +/** + * Extracts non-sort-key values for top-n from their {@link IntBlock}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ abstract class ValueExtractorForInt implements ValueExtractor { static ValueExtractorForInt extractorFor(TopNEncoder encoder, boolean inKey, IntBlock block) { IntVector vector = block.asVector(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForLong.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForLong.java index b7b566b3eda3d..4a244746bd0d3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForLong.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ValueExtractorForLong.java @@ -11,6 +11,10 @@ import org.elasticsearch.compute.data.LongVector; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +/** + * Extracts non-sort-key values for top-n from their {@link LongBlock}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ abstract class ValueExtractorForLong implements ValueExtractor { static ValueExtractorForLong extractorFor(TopNEncoder encoder, boolean inKey, LongBlock block) { LongVector vector = block.asVector(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/X-KeyExtractor.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/X-KeyExtractor.java.st index dbe0b23af93bb..90a4044a10a93 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/X-KeyExtractor.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/X-KeyExtractor.java.st @@ -14,20 +14,26 @@ import org.elasticsearch.compute.data.$Type$Block; import org.elasticsearch.compute.data.$Type$Vector; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +import java.util.Locale; + +/** + * Extracts sort keys for top-n from their {@link $Type$Block}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ abstract class KeyExtractorFor$Type$ implements KeyExtractor { static KeyExtractorFor$Type$ extractorFor(TopNEncoder encoder, boolean ascending, byte nul, byte nonNul, $Type$Block block) { $Type$Vector v = block.asVector(); if (v != null) { - return new KeyExtractorFor$Type$.ForVector(encoder, nul, nonNul, v); + return new KeyExtractorFor$Type$.FromVector(encoder, nul, nonNul, v); } if (ascending) { return block.mvSortedAscending() - ? new KeyExtractorFor$Type$.MinForAscending(encoder, nul, nonNul, block) - : new KeyExtractorFor$Type$.MinForUnordered(encoder, nul, nonNul, block); + ? new KeyExtractorFor$Type$.MinFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorFor$Type$.MinFromUnorderedBlock(encoder, nul, nonNul, block); } return block.mvSortedAscending() - ? new KeyExtractorFor$Type$.MaxForAscending(encoder, nul, nonNul, block) - : new KeyExtractorFor$Type$.MaxForUnordered(encoder, nul, nonNul, block); + ? new KeyExtractorFor$Type$.MaxFromAscendingBlock(encoder, nul, nonNul, block) + : new KeyExtractorFor$Type$.MaxFromUnorderedBlock(encoder, nul, nonNul, block); } $if(BytesRef)$ @@ -65,10 +71,19 @@ $endif$ return 1; } - static class ForVector extends KeyExtractorFor$Type$ { + @Override + public final String toString() { +$if(BytesRef)$ + return String.format(Locale.ROOT, "KeyExtractorFor$Type$%s(%s, %s, %s)", getClass().getSimpleName(), encoder, nul, nonNul); +$else$ + return String.format(Locale.ROOT, "KeyExtractorFor$Type$%s(%s, %s)", getClass().getSimpleName(), nul, nonNul); +$endif$ + } + + static class FromVector extends KeyExtractorFor$Type$ { private final $Type$Vector vector; - ForVector(TopNEncoder encoder, byte nul, byte nonNul, $Type$Vector vector) { + FromVector(TopNEncoder encoder, byte nul, byte nonNul, $Type$Vector vector) { super(encoder, nul, nonNul); this.vector = vector; } @@ -83,10 +98,10 @@ $endif$ } } - static class MinForAscending extends KeyExtractorFor$Type$ { + static class MinFromAscendingBlock extends KeyExtractorFor$Type$ { private final $Type$Block block; - MinForAscending(TopNEncoder encoder, byte nul, byte nonNul, $Type$Block block) { + MinFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, $Type$Block block) { super(encoder, nul, nonNul); this.block = block; } @@ -104,10 +119,10 @@ $endif$ } } - static class MaxForAscending extends KeyExtractorFor$Type$ { + static class MaxFromAscendingBlock extends KeyExtractorFor$Type$ { private final $Type$Block block; - MaxForAscending(TopNEncoder encoder, byte nul, byte nonNul, $Type$Block block) { + MaxFromAscendingBlock(TopNEncoder encoder, byte nul, byte nonNul, $Type$Block block) { super(encoder, nul, nonNul); this.block = block; } @@ -125,14 +140,14 @@ $endif$ } } - static class MinForUnordered extends KeyExtractorFor$Type$ { + static class MinFromUnorderedBlock extends KeyExtractorFor$Type$ { private final $Type$Block block; $if(BytesRef)$ private final BytesRef minScratch = new BytesRef(); $endif$ - MinForUnordered(TopNEncoder encoder, byte nul, byte nonNul, $Type$Block block) { + MinFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, $Type$Block block) { super(encoder, nul, nonNul); this.block = block; } @@ -173,14 +188,14 @@ $endif$ } } - static class MaxForUnordered extends KeyExtractorFor$Type$ { + static class MaxFromUnorderedBlock extends KeyExtractorFor$Type$ { private final $Type$Block block; $if(BytesRef)$ private final BytesRef maxScratch = new BytesRef(); $endif$ - MaxForUnordered(TopNEncoder encoder, byte nul, byte nonNul, $Type$Block block) { + MaxFromUnorderedBlock(TopNEncoder encoder, byte nul, byte nonNul, $Type$Block block) { super(encoder, nul, nonNul); this.block = block; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/X-ResultBuilder.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/X-ResultBuilder.java.st index 49bece755820f..4858dba3b4de7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/X-ResultBuilder.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/X-ResultBuilder.java.st @@ -11,6 +11,10 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.$Type$Block; +/** + * Builds the resulting {@link $Type$Block} for some column in a top-n. + * This class is generated. Edit {@code X-ResultBuilder.java.st} instead. + */ class ResultBuilderFor$Type$ implements ResultBuilder { private final $Type$Block.Builder builder; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/X-ValueExtractor.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/X-ValueExtractor.java.st index 0e25e44834c17..ef80df5c334f2 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/X-ValueExtractor.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/topn/X-ValueExtractor.java.st @@ -14,6 +14,10 @@ import org.elasticsearch.compute.data.$Type$Block; import org.elasticsearch.compute.data.$Type$Vector; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; +/** + * Extracts non-sort-key values for top-n from their {@link $Type$Block}s. + * This class is generated. Edit {@code X-KeyExtractor.java.st} instead. + */ abstract class ValueExtractorFor$Type$ implements ValueExtractor { static ValueExtractorFor$Type$ extractorFor(TopNEncoder encoder, boolean inKey, $Type$Block block) { $Type$Vector vector = block.asVector(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java index 22b17190c0355..10fecd122672a 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java @@ -936,7 +936,6 @@ private void assertSortingOnMV( assertMap(actualValues, matchesList(List.of(expectedValues.subList(0, topCount)))); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104167") public void testRandomMultiValuesTopN() { DriverContext driverContext = driverContext(); int rows = randomIntBetween(50, 100); @@ -947,6 +946,7 @@ public void testRandomMultiValuesTopN() { Set uniqueOrders = new LinkedHashSet<>(sortingByColumns); List>> expectedValues = new ArrayList<>(rows); List blocks = new ArrayList<>(blocksCount); + boolean[] validSortKeys = new boolean[blocksCount]; List elementTypes = new ArrayList<>(blocksCount); List encoders = new ArrayList<>(blocksCount); @@ -960,6 +960,7 @@ public void testRandomMultiValuesTopN() { () -> randomFrom(ElementType.values()) ); elementTypes.add(e); + validSortKeys[type] = true; try (Block.Builder builder = e.newBlockBuilder(rows, driverContext().blockFactory())) { List previousValue = null; Function randomValueSupplier = (blockType) -> randomValue(blockType); @@ -967,23 +968,22 @@ public void testRandomMultiValuesTopN() { if (rarely()) { randomValueSupplier = switch (randomInt(2)) { case 0 -> { - // use the right BytesRef encoder (don't touch the bytes) + // Simulate ips encoders.add(TopNEncoder.IP); - // deal with IP fields (BytesRef block) like ES does and properly encode the ip addresses yield (blockType) -> new BytesRef(InetAddressPoint.encode(randomIp(randomBoolean()))); } case 1 -> { - // use the right BytesRef encoder (don't touch the bytes) + // Simulate version fields encoders.add(TopNEncoder.VERSION); - // create a valid Version yield (blockType) -> randomVersion().toBytesRef(); } - default -> { - // use the right BytesRef encoder (don't touch the bytes) + case 2 -> { + // Simulate geo_shape and geo_point encoders.add(DEFAULT_UNSORTABLE); - // create a valid geo_point + validSortKeys[type] = false; yield (blockType) -> randomPointAsWKB(); } + default -> throw new UnsupportedOperationException(); }; } else { encoders.add(UTF8); @@ -1033,10 +1033,16 @@ public void testRandomMultiValuesTopN() { } } - // simulate the LogicalPlanOptimizer.PruneRedundantSortClauses by eliminating duplicate sorting columns (same column, same asc/desc, - // same "nulls" handling) - while (uniqueOrders.size() < sortingByColumns) { - int column = randomIntBetween(0, blocksCount - 1); + /* + * Build sort keys, making sure not to include duplicates. This could + * build fewer than the desired sort columns, but it's more important + * to make sure that we don't include dups + * (to simulate LogicalPlanOptimizer.PruneRedundantSortClauses) and + * not to include sort keys that simulate geo objects. Those aren't + * sortable at all. + */ + for (int i = 0; i < sortingByColumns; i++) { + int column = randomValueOtherThanMany(c -> false == validSortKeys[c], () -> randomIntBetween(0, blocksCount - 1)); uniqueOrders.add(new TopNOperator.SortOrder(column, randomBoolean(), randomBoolean())); } From 97d0c8c07a1c6a1108aa511d266928f59cc6a219 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 12 Jan 2024 15:36:14 -0500 Subject: [PATCH 13/95] ESQL: Fix error test on windows (#104340) This fixes a test on windows - the error message contains the platform local line endings because it comes from the jvm. Closes #104296 Closes #104245 --- .../function/scalar/string/ReplaceTests.java | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java index 60268b9e27764..6c6500bfc333d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java @@ -80,7 +80,7 @@ public static Iterable parameters() { ) ); - suppliers.add(new TestCaseSupplier(List.of(DataTypes.KEYWORD, DataTypes.KEYWORD, DataTypes.KEYWORD), () -> { + suppliers.add(new TestCaseSupplier("syntax error", List.of(DataTypes.KEYWORD, DataTypes.KEYWORD, DataTypes.KEYWORD), () -> { String text = randomAlphaOfLength(10); String invalidRegex = "["; String newStr = randomAlphaOfLength(5); @@ -94,8 +94,16 @@ public static Iterable parameters() { DataTypes.KEYWORD, equalTo(null) ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") - .withWarning("Line -1:-1: java.util.regex.PatternSyntaxException: Unclosed character class near index 0\n[\n^") - .withFoldingException(PatternSyntaxException.class, "Unclosed character class near index 0\n[\n^"); + .withWarning( + "Line -1:-1: java.util.regex.PatternSyntaxException: Unclosed character class near index 0\n[\n^".replaceAll( + "\n", + System.lineSeparator() + ) + ) + .withFoldingException( + PatternSyntaxException.class, + "Unclosed character class near index 0\n[\n^".replaceAll("\n", System.lineSeparator()) + ); })); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); } From 3da01e0a1690bb1c304b8237da918d4e2a933889 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 12 Jan 2024 15:37:52 -0500 Subject: [PATCH 14/95] ESQL: Fix old version tests (#104333) This weakens an assertion in the ESQL tests rolling upgrade tests so they'll pass against older versions of Elasticsearch. Apparently the warning message changed. There isn't a good reason to be so strict about the assertion anyway. Closes #104101 --- .../xpack/restart/FullClusterRestartIT.java | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 6854035281670..4234c8e7913ba 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.UpdateForV9; -import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.test.StreamsUtils; @@ -996,7 +995,6 @@ public void testDataStreams() throws Exception { /** * Tests that a single document survives. Super basic smoke test. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104101") public void testDisableFieldNameField() throws IOException { assumeTrue("can only disable field names field before 8.0", Version.fromString(getOldClusterVersion()).before(Version.V_8_0_0)); String docLocation = "/nofnf/_doc/1"; @@ -1023,10 +1021,11 @@ public void testDisableFieldNameField() throws IOException { } } }"""); - createIndex.setOptions( - RequestOptions.DEFAULT.toBuilder() - .setWarningsHandler(warnings -> false == warnings.equals(List.of(FieldNamesFieldMapper.ENABLED_DEPRECATION_MESSAGE))) - ); + createIndex.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> switch (warnings.size()) { + case 0 -> false; // old versions don't return a warning + case 1 -> false == warnings.get(0).contains("_field_names"); + default -> true; + })); client().performRequest(createIndex); Request createDoc = new Request("PUT", docLocation); From 63b3e66fdf30fe57c2a433848bdef425d850ed2b Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 12 Jan 2024 13:46:05 -0800 Subject: [PATCH 15/95] AwaitsFix #104343 --- .../allocation/allocator/DesiredBalanceComputerTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java index 9fe168074f41e..1c2b35fe050f5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceComputerTests.java @@ -581,6 +581,7 @@ public void testAppliesMoveCommands() { ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104343") public void testDesiredBalanceShouldConvergeInABigCluster() { var nodes = randomIntBetween(3, 7); var nodeIds = new ArrayList(nodes); From 149f4a1376819fc01107fcbe5ed9ad5278051bf3 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Fri, 12 Jan 2024 14:48:01 -0800 Subject: [PATCH 16/95] AwaitsFix #103108 --- .../xpack/ml/integration/MlDistributedFailureIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index 33fd7c108863b..942729bb81c64 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -541,6 +541,7 @@ public void testClusterWithTwoMlNodes_RunsDatafeed_GivenOriginalNodeGoesDown() t }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103108") public void testClusterWithTwoMlNodes_StopsDatafeed_GivenJobFailsOnReassign() throws Exception { internalCluster().ensureAtMostNumDataNodes(0); logger.info("Starting dedicated master node..."); From 3c6ab3aba6f3c42326229571edb31c195b10579a Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Sat, 13 Jan 2024 12:49:39 +0100 Subject: [PATCH 17/95] Increase stateless refresh thread pool (#104332) The refresh thread pool is sized too aggressively low for current state of stateless, so increasing it. Relates ES-7633 and ES-7631 --- .../main/java/org/elasticsearch/threadpool/ThreadPool.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index bfcd8c8a396f5..17cafaee19bb4 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -228,7 +229,9 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui new ScalingExecutorBuilder(Names.MANAGEMENT, 1, boundedBy(allocatedProcessors, 1, 5), TimeValue.timeValueMinutes(5), false) ); builders.put(Names.FLUSH, new ScalingExecutorBuilder(Names.FLUSH, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5), false)); - builders.put(Names.REFRESH, new ScalingExecutorBuilder(Names.REFRESH, 1, halfProcMaxAt10, TimeValue.timeValueMinutes(5), false)); + // TODO: remove (or refine) this temporary stateless custom refresh pool sizing once ES-7631 is solved. + final int refreshThreads = DiscoveryNode.isStateless(settings) ? allocatedProcessors : halfProcMaxAt10; + builders.put(Names.REFRESH, new ScalingExecutorBuilder(Names.REFRESH, 1, refreshThreads, TimeValue.timeValueMinutes(5), false)); builders.put(Names.WARMER, new ScalingExecutorBuilder(Names.WARMER, 1, halfProcMaxAt5, TimeValue.timeValueMinutes(5), false)); final int maxSnapshotCores = getMaxSnapshotThreadPoolSize(allocatedProcessors); builders.put(Names.SNAPSHOT, new ScalingExecutorBuilder(Names.SNAPSHOT, 1, maxSnapshotCores, TimeValue.timeValueMinutes(5), false)); From 87e6b206c021d6153a89f7f8d1e844b1f3f5441b Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Sat, 13 Jan 2024 10:26:44 -0800 Subject: [PATCH 18/95] AwaitsFix #104348 --- .../resources/rest-api-spec/test/data_stream/10_basic.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml index f5837f6d8c286..22b541425b74f 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml @@ -538,8 +538,10 @@ setup: --- "Delete data stream with failure stores": - skip: - version: " - 8.11.99" - reason: "data streams only supported in 8.12+" + # version: " - 8.11.99" + # reason: "data streams only supported in 8.12+" + version: all + reason: AwaitsFix https://github.com/elastic/elasticsearch/issues/104348 - do: allowed_warnings: From ba3d9c6de46331f52f75a9212d609dd530d3a71b Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Sat, 13 Jan 2024 10:34:49 -0800 Subject: [PATCH 19/95] AwaitsFix #104349 --- .../org/elasticsearch/index/shard/ShardSplittingQueryTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java index 851ad18500add..0895d680046c9 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java @@ -68,6 +68,7 @@ public void testSplitOnID() throws IOException { dir.close(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104349") public void testSplitOnRouting() throws IOException { SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); Directory dir = newFSDirectory(createTempDir()); From 4d7e0ec1ef195480a7dde39a9876ecde875b1bdc Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sat, 13 Jan 2024 15:03:00 -0800 Subject: [PATCH 20/95] Bump min target page size to 32 (#104335) The current MIN_TARGET_PAGE_SIZE is set to 10, which may be too low. I think most of the optimizations in ESQL are focused on processing rows rather than pages. The overhead of processing many pages can be significant in some cases. For instance, the execution time of HeapAttackIT#testGroupOnManyLongs decreased from 52 seconds to 28 seconds when I increased MIN_TARGET_PAGE_SIZE from 10 to 32. Therefore, I propose raising the MIN_TARGET_PAGE_SIZE to 32. --- .../main/java/org/elasticsearch/compute/operator/Operator.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java index 63dbdf2be09bf..fd6589bf5a913 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/Operator.java @@ -39,7 +39,7 @@ public interface Operator extends Releasable { * non-trivial overhead and it's just not worth building even * smaller blocks without under normal circumstances. */ - int MIN_TARGET_PAGE_SIZE = 10; + int MIN_TARGET_PAGE_SIZE = 32; /** * whether the given operator can accept more input pages From e760999b03cd1daef5eea6a087cfce28b3dc8e70 Mon Sep 17 00:00:00 2001 From: Pooya Salehi Date: Mon, 15 Jan 2024 11:09:15 +0100 Subject: [PATCH 21/95] Consider LVM archive memory usage in IndexingMemoryController (#103979) The is a follow up to https://github.com/elastic/elasticsearch/pull/102752 to integrate the LVM archive memory usage in `IndexingMemoryController` and enforce a flush if necessary. Basically, the approach is to keep track of how much of the Archive's memory usage will be freed up by an ongoing Stateless refresh, since unlike the stateful approach to reclaim version map memory, the stateless one would require propagating the commit to the unpromotables, and while this is happening we don't want to add the shard back to the IndexingMemoryController queue over and over again. Requries https://github.com/elastic/elasticsearch/pull/104122 Relates ES-5921 --- .../index/engine/InternalEngine.java | 2 +- .../index/engine/LiveVersionMap.java | 26 ++++++++++++------- .../index/engine/LiveVersionMapArchive.java | 17 ++++++++++-- .../index/engine/LiveVersionMapTestUtils.java | 8 ++++++ 4 files changed, 41 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 65834a8c011f2..85b9417816e0f 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -2111,7 +2111,7 @@ public void writeIndexingBuffer() throws IOException { } } - private void reclaimVersionMapMemory() { + protected void reclaimVersionMapMemory() { // If we're already halfway through the flush thresholds, then we do a flush. This will save us from writing segments twice // independently in a short period of time, once to reclaim version map memory and then to reclaim the translog. For // memory-constrained deployments that need to refresh often to reclaim memory, this may require flushing 2x more often than diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index 7cc1b92b43c43..7ad0f21e331eb 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -476,19 +476,26 @@ public long ramBytesUsed() { } /** - * Returns how much RAM is used by refresh. This is the RAM usage of the current and old version maps. + * Returns how much RAM is used by refresh. This is the RAM usage of the current and old version maps, and the RAM usage of the + * archive, if any. */ long ramBytesUsedForRefresh() { - return maps.ramBytesUsed(); + return maps.ramBytesUsed() + archive.getMemoryBytesUsed(); } /** - * Returns how much RAM could be reclaimed from the version map. This is the RAM usage of the current version map, and could be - * reclaimed by refreshing. It doesn't include tombstones since they don't get cleared on refresh, nor the old version map that - * is being reclaimed. + * Returns how much RAM could be reclaimed from the version map. + *

+ * In stateful, this is the RAM usage of the current version map, and could be reclaimed by refreshing. It doesn't include tombstones + * since they don't get cleared on refresh, nor the old version map that is being reclaimed. + *

+ * In stateless, this is the RAM usage of current and old version map plus the RAM usage of the parts of the archive that require a + * new unpromotable refresh. To reclaim all three components we need to refresh AND flush. */ long reclaimableRefreshRamBytes() { - return maps.current.ramBytesUsed.get(); + return archive == LiveVersionMapArchive.NOOP_ARCHIVE + ? maps.current.ramBytesUsed.get() + : maps.ramBytesUsed() + archive.getReclaimableMemoryBytes(); } /** @@ -499,11 +506,12 @@ long ramBytesUsedForArchive() { } /** - * Returns how much RAM is current being freed up by refreshing. This is the RAM usage of the previous version map that needs to stay - * around until operations are safely recorded in the Lucene index. + * Returns how much RAM is current being freed up by refreshing. In Stateful, this is the RAM usage of the previous version map that + * needs to stay around until operations are safely recorded in the Lucene index. In Stateless, this is the RAM usage of a fraction + * of the Archive entries that are kept around until an ongoing unpromotable refresh is finished. */ long getRefreshingBytes() { - return maps.old.ramBytesUsed.get(); + return archive == LiveVersionMapArchive.NOOP_ARCHIVE ? maps.old.ramBytesUsed.get() : archive.getRefreshingMemoryBytes(); } /** diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMapArchive.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMapArchive.java index 9ccbf6ac16fed..d3361689bd3db 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMapArchive.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMapArchive.java @@ -40,13 +40,26 @@ default boolean isUnsafe() { } /** - * Returns how much memory is currently being used by the archive and would be freed up after - * unpromotables are refreshed. + * Returns the total memory usage if the Archive. */ default long getMemoryBytesUsed() { return 0L; } + /** + * Returns how much memory could be freed up by creating a new commit and issuing a new unpromotable refresh. + */ + default long getReclaimableMemoryBytes() { + return 0; + } + + /** + * Returns how much memory will be freed once the current ongoing unpromotable refresh is finished. + */ + default long getRefreshingMemoryBytes() { + return 0; + } + LiveVersionMapArchive NOOP_ARCHIVE = new LiveVersionMapArchive() { @Override public void afterRefresh(LiveVersionMap.VersionLookup old) {} diff --git a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTestUtils.java b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTestUtils.java index 3185769bdab82..4d7ea709d565c 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTestUtils.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LiveVersionMapTestUtils.java @@ -61,6 +61,14 @@ public static void pruneTombstones(LiveVersionMap map, long maxTimestampToPrune, map.pruneTombstones(maxTimestampToPrune, maxSeqNoToPrune); } + public static long reclaimableRefreshRamBytes(LiveVersionMap map) { + return map.reclaimableRefreshRamBytes(); + } + + public static long refreshingBytes(LiveVersionMap map) { + return map.getRefreshingBytes(); + } + public static IndexVersionValue randomIndexVersionValue() { return new IndexVersionValue(randomTranslogLocation(), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong()); } From 8582a18817e745eb19609387d753959399c66263 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Mon, 15 Jan 2024 11:18:12 +0100 Subject: [PATCH 22/95] [Profiling] Extract properties faster from source (#104356) Previously the extraction code for reading stacktrace and executable properties has used an implementation of `ObjectPath#eval()` that split the provided object path using a regex. As the object path is constant and known in advance we can directly provide the final object path avoiding this costly step. --- docs/changelog/104356.yaml | 5 +++++ .../java/org/elasticsearch/xpack/profiling/StackTrace.java | 7 +++++-- .../xpack/profiling/TransportGetStackTracesAction.java | 3 ++- 3 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/104356.yaml diff --git a/docs/changelog/104356.yaml b/docs/changelog/104356.yaml new file mode 100644 index 0000000000000..e0cb2311fbfc9 --- /dev/null +++ b/docs/changelog/104356.yaml @@ -0,0 +1,5 @@ +pr: 104356 +summary: "[Profiling] Extract properties faster from source" +area: Application +type: enhancement +issues: [] diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackTrace.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackTrace.java index 1cbc724132d10..b417e267f12da 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackTrace.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackTrace.java @@ -20,6 +20,9 @@ import java.util.function.Consumer; final class StackTrace implements ToXContentObject { + private static final String[] PATH_FRAME_IDS = new String[] { "Stacktrace", "frame", "ids" }; + private static final String[] PATH_FRAME_TYPES = new String[] { "Stacktrace", "frame", "types" }; + static final int NATIVE_FRAME_TYPE = 3; static final int KERNEL_FRAME_TYPE = 4; List addressOrLines; @@ -188,8 +191,8 @@ static String getFileIDFromStackFrameID(String frameID) { } public static StackTrace fromSource(Map source) { - String inputFrameIDs = ObjectPath.eval("Stacktrace.frame.ids", source); - String inputFrameTypes = ObjectPath.eval("Stacktrace.frame.types", source); + String inputFrameIDs = ObjectPath.eval(PATH_FRAME_IDS, source); + String inputFrameTypes = ObjectPath.eval(PATH_FRAME_TYPES, source); int countsFrameIDs = inputFrameIDs.length() / BASE64_FRAME_ID_LENGTH; List fileIDs = new ArrayList<>(countsFrameIDs); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java index 9b5eaeb4aa6fd..ff9ad2e4f2e38 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java @@ -683,6 +683,7 @@ private void retrieveStackTraceDetails( * Collects stack trace details which are retrieved concurrently and sends a response only when all details are known. */ private static class DetailsHandler { + private static final String[] PATH_FILE_NAME = new String[] { "Executable", "file", "name" }; private final GetStackTracesResponseBuilder builder; private final ActionListener submitListener; private final Map executables; @@ -740,7 +741,7 @@ public void onExecutableDetailsResponse(MultiGetResponse multiGetItemResponses) if (executable.getResponse().isExists()) { // Duplicates are expected as we query multiple indices - do a quick pre-check before we deserialize a response if (executables.containsKey(executable.getId()) == false) { - String fileName = ObjectPath.eval("Executable.file.name", executable.getResponse().getSource()); + String fileName = ObjectPath.eval(PATH_FILE_NAME, executable.getResponse().getSource()); if (fileName != null) { executables.putIfAbsent(executable.getId(), fileName); } else { From f9398ed506633e4bdc46efd184fb5cc5457479b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20Zolt=C3=A1n=20Szab=C3=B3?= Date: Mon, 15 Jan 2024 11:21:22 +0100 Subject: [PATCH 23/95] [DOCS] Adds Hugging Face service to the inference API docs (#104190) --- .../inference/put-inference.asciidoc | 71 ++++++++++++++++++- 1 file changed, 69 insertions(+), 2 deletions(-) diff --git a/docs/reference/inference/put-inference.asciidoc b/docs/reference/inference/put-inference.asciidoc index 5d517d313b9ea..1d097c91bbedf 100644 --- a/docs/reference/inference/put-inference.asciidoc +++ b/docs/reference/inference/put-inference.asciidoc @@ -25,6 +25,7 @@ own model, use the <>. * Requires the `manage` <>. + [discrete] [[put-inference-api-desc]] ==== {api-description-title} @@ -33,10 +34,12 @@ The create {infer} API enables you to create and configure an {infer} model to perform a specific {infer} task. The following services are available through the {infer} API: + * ELSER * OpenAI * Hugging Face + [discrete] [[put-inference-api-path-params]] ==== {api-path-parms-title} @@ -63,7 +66,8 @@ The type of service supported for the specified task type. Available services: * `elser`: specify the `sparse_embedding` task type to use the ELSER service. * `openai`: specify the `text_embedding` task type to use the OpenAI service. -* `hugging_face`: specify the `text_embedding` task type to use the Hugging Face service. +* `hugging_face`: specify the `text_embedding` task type to use the Hugging Face +service. `service_settings`:: (Required, object) @@ -108,6 +112,26 @@ https://platform.openai.com/account/organization[**Settings** > **Organizations* The URL endpoint to use for the requests. Can be changed for testing purposes. Defaults to `https://api.openai.com/v1/embeddings`. ===== ++ +.`service_settings` for `hugging_face` +[%collapsible%closed] +===== +`api_key`::: +(Required, string) +A valid access token of your Hugging Face account. You can find your Hugging +Face access tokens or you can create a new one +https://huggingface.co/settings/tokens[on the settings page]. + +IMPORTANT: You need to provide the API key only once, during the {infer} model +creation. The <> does not retrieve your API key. After +creating the {infer} model, you cannot change the associated API key. If you +want to use a different API key, delete the {infer} model and recreate it with +the same name and the updated API key. + +`url`::: +(Required, string) +The URL endpoint to use for the requests. +===== `task_settings`:: (Optional, object) @@ -124,10 +148,18 @@ https://platform.openai.com/docs/guides/embeddings/what-are-embeddings[OpenAI do for the list of available text embedding models. ===== + [discrete] [[put-inference-api-example]] ==== {api-examples-title} +This section contains example API calls for every service type. + + +[discrete] +[[inference-example-elser]] +===== ELSER service + The following example shows how to create an {infer} model called `my-elser-model` to perform a `sparse_embedding` task type. @@ -164,6 +196,10 @@ Example response: // NOTCONSOLE +[discrete] +[[inference-example-openai]] +===== OpenAI service + The following example shows how to create an {infer} model called `openai_embeddings` to perform a `text_embedding` task type. @@ -180,4 +216,35 @@ PUT _inference/text_embedding/openai_embeddings } } ------------------------------------------------------------ -// TEST[skip:TBD] \ No newline at end of file +// TEST[skip:TBD] + + +[discrete] +[[inference-example-hugging-face]] +===== Hugging Face service + +The following example shows how to create an {infer} model called +`hugging-face_embeddings` to perform a `text_embedding` task type. + +[source,console] +------------------------------------------------------------ +PUT _inference/text_embedding/hugging-face-embeddings +{ + "service": "hugging_face", + "service_settings": { + "api_key": "", <1> + "url": "" <2> + } +} +------------------------------------------------------------ +// TEST[skip:TBD] +<1> A valid Hugging Face access token. You can find on the +https://huggingface.co/settings/tokens[settings page of your account]. +<2> The {infer} endpoint URL you created on Hugging Face. + +Create a new {infer} endpoint on +https://ui.endpoints.huggingface.co/[the Hugging Face endpoint page] to get an +endpoint URL. Select the model you want to use on the new endpoint creation page +- for example `intfloat/e5-small-v2` - then select the `Sentence Embeddings` +task under the Advanced configuration section. Create the endpoint. Copy the URL +after the endpoint initialization has been finished. \ No newline at end of file From ad28dc9a6ca525e4f6cd2a9bf72c177cd4063ce2 Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Mon, 15 Jan 2024 11:58:57 +0100 Subject: [PATCH 24/95] ESQL: Add TO_UPPER and TO_LOWER functions (#104309) --- docs/changelog/104309.yaml | 5 + .../esql/functions/signature/to_lower.svg | 1 + .../esql/functions/signature/to_upper.svg | 1 + .../esql/functions/string-functions.asciidoc | 4 + .../esql/functions/to_lower.asciidoc | 20 +++ .../esql/functions/to_upper.asciidoc | 20 +++ .../esql/functions/types/to_lower.asciidoc | 6 + .../esql/functions/types/to_upper.asciidoc | 6 + .../src/main/resources/show.csv-spec | 6 +- .../src/main/resources/string.csv-spec | 53 ++++++++ .../scalar/string/ToLowerEvaluator.java | 118 ++++++++++++++++++ .../scalar/string/ToUpperEvaluator.java | 118 ++++++++++++++++++ .../function/EsqlFunctionRegistry.java | 6 +- .../function/scalar/string/ToLower.java | 109 ++++++++++++++++ .../function/scalar/string/ToUpper.java | 109 ++++++++++++++++ .../xpack/esql/io/stream/PlanNamedTypes.java | 20 +++ .../function/scalar/string/ToLowerTests.java | 91 ++++++++++++++ .../function/scalar/string/ToUpperTests.java | 91 ++++++++++++++ 18 files changed, 782 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/104309.yaml create mode 100644 docs/reference/esql/functions/signature/to_lower.svg create mode 100644 docs/reference/esql/functions/signature/to_upper.svg create mode 100644 docs/reference/esql/functions/to_lower.asciidoc create mode 100644 docs/reference/esql/functions/to_upper.asciidoc create mode 100644 docs/reference/esql/functions/types/to_lower.asciidoc create mode 100644 docs/reference/esql/functions/types/to_upper.asciidoc create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java diff --git a/docs/changelog/104309.yaml b/docs/changelog/104309.yaml new file mode 100644 index 0000000000000..4467eb6722afc --- /dev/null +++ b/docs/changelog/104309.yaml @@ -0,0 +1,5 @@ +pr: 104309 +summary: "ESQL: Add TO_UPPER and TO_LOWER functions" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/reference/esql/functions/signature/to_lower.svg b/docs/reference/esql/functions/signature/to_lower.svg new file mode 100644 index 0000000000000..8d49539f0a0c8 --- /dev/null +++ b/docs/reference/esql/functions/signature/to_lower.svg @@ -0,0 +1 @@ +TO_LOWER(str) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_upper.svg b/docs/reference/esql/functions/signature/to_upper.svg new file mode 100644 index 0000000000000..f7cea528331a2 --- /dev/null +++ b/docs/reference/esql/functions/signature/to_upper.svg @@ -0,0 +1 @@ +TO_UPPER(str) \ No newline at end of file diff --git a/docs/reference/esql/functions/string-functions.asciidoc b/docs/reference/esql/functions/string-functions.asciidoc index b209244b93297..e9fe04ce15761 100644 --- a/docs/reference/esql/functions/string-functions.asciidoc +++ b/docs/reference/esql/functions/string-functions.asciidoc @@ -17,6 +17,8 @@ * <> * <> * <> +* <> +* <> * <> // end::string_list[] @@ -29,4 +31,6 @@ include::right.asciidoc[] include::rtrim.asciidoc[] include::split.asciidoc[] include::substring.asciidoc[] +include::to_lower.asciidoc[] +include::to_upper.asciidoc[] include::trim.asciidoc[] diff --git a/docs/reference/esql/functions/to_lower.asciidoc b/docs/reference/esql/functions/to_lower.asciidoc new file mode 100644 index 0000000000000..3d55e39e7c1ca --- /dev/null +++ b/docs/reference/esql/functions/to_lower.asciidoc @@ -0,0 +1,20 @@ +[discrete] +[[esql-to_lower]] +=== `TO_LOWER` +[.text-center] +image::esql/functions/signature/to_lower.svg[Embedded,opts=inline] + +Returns a new string representing the input string converted to lower case + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=to_lower] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/string.csv-spec[tag=to_lower-result] +|=== + +Supported types: + +include::types/to_lower.asciidoc[] diff --git a/docs/reference/esql/functions/to_upper.asciidoc b/docs/reference/esql/functions/to_upper.asciidoc new file mode 100644 index 0000000000000..b451cc53d35e8 --- /dev/null +++ b/docs/reference/esql/functions/to_upper.asciidoc @@ -0,0 +1,20 @@ +[discrete] +[[esql-to_upper]] +=== `TO_UPPER` +[.text-center] +image::esql/functions/signature/to_upper.svg[Embedded,opts=inline] + +Returns a new string representing the input string converted to upper case + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=to_upper] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/string.csv-spec[tag=to_upper-result] +|=== + +Supported types: + +include::types/to_upper.asciidoc[] diff --git a/docs/reference/esql/functions/types/to_lower.asciidoc b/docs/reference/esql/functions/types/to_lower.asciidoc new file mode 100644 index 0000000000000..26f4e7633d8ae --- /dev/null +++ b/docs/reference/esql/functions/types/to_lower.asciidoc @@ -0,0 +1,6 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +str | result +keyword | keyword +text | text +|=== diff --git a/docs/reference/esql/functions/types/to_upper.asciidoc b/docs/reference/esql/functions/types/to_upper.asciidoc new file mode 100644 index 0000000000000..26f4e7633d8ae --- /dev/null +++ b/docs/reference/esql/functions/types/to_upper.asciidoc @@ -0,0 +1,6 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +str | result +keyword | keyword +text | text +|=== diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index 891b40cc95e50..f3cb362c40e22 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -85,12 +85,14 @@ to_int |"integer to_int(v:boolean|date|keyword|text|double|lon to_integer |"integer to_integer(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |integer | "Converts an input value to an integer value." |false |false | false to_ip |"ip to_ip(v:ip|keyword|text)" |v |"ip|keyword|text" | |ip | "Converts an input string to an IP value." |false |false | false to_long |"long to_long(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |long | "Converts an input value to a long value." |false |false | false +to_lower |"keyword|text to_lower(str:keyword|text)" |str |"keyword|text" | "The input string" |"keyword|text" | "Returns a new string representing the input string converted to lower case." |false |false | false to_radians |"double to_radians(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | |double | "Converts a number in degrees to radians." |false |false | false to_str |"keyword to_str(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" |v |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | |keyword | "Converts a field into a string." |false |false | false to_string |"keyword to_string(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" |v |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | |keyword | "Converts a field into a string." |false |false | false to_ul |"unsigned_long to_ul(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |unsigned_long | "Converts an input value to an unsigned long value." |false |false | false to_ulong |"unsigned_long to_ulong(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |unsigned_long | "Converts an input value to an unsigned long value." |false |false | false to_unsigned_long |"unsigned_long to_unsigned_long(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |unsigned_long | "Converts an input value to an unsigned long value." |false |false | false +to_upper |"keyword|text to_upper(str:keyword|text)" |str |"keyword|text" | "The input string" |"keyword|text" | "Returns a new string representing the input string converted to upper case." |false |false | false to_ver |"version to_ver(v:keyword|text|version)" |v |"keyword|text|version" | |version | "Converts an input string to a version value." |false |false | false to_version |"version to_version(v:keyword|text|version)" |v |"keyword|text|version" | |version | "Converts an input string to a version value." |false |false | false trim |"keyword|text trim(str:keyword|text)" |str |"keyword|text" | "" |"keyword|text" | "Removes leading and trailing whitespaces from a string." | false | false | false @@ -176,12 +178,14 @@ double tau() "integer to_integer(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "ip to_ip(v:ip|keyword|text)" "long to_long(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" +"keyword|text to_lower(str:keyword|text)" "double to_radians(v:double|integer|long|unsigned_long)" "keyword to_str(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" "keyword to_string(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" "unsigned_long to_ul(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "unsigned_long to_ulong(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "unsigned_long to_unsigned_long(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" +"keyword|text to_upper(str:keyword|text)" "version to_ver(v:keyword|text|version)" "version to_version(v:keyword|text|version)" "keyword|text trim(str:keyword|text)" @@ -208,5 +212,5 @@ countFunctions#[skip:-8.12.99] show functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -84 | 84 | 84 +86 | 86 | 86 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index b8b80df389f9c..bdbcfb3cb49e9 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -952,3 +952,56 @@ Bamford |true Bernatsky |false // end::endsWith-result[] ; + + + +toLowerRow#[skip:-8.12.99] +// tag::to_lower[] +ROW message = "Some Text" +| EVAL message_lower = TO_LOWER(message) +// end::to_lower[] +; + +// tag::to_lower-result[] +message:keyword | message_lower:keyword +Some Text | some text +// end::to_lower-result[] +; + + +toLower#[skip:-8.12.99] +from employees | sort emp_no | eval name_lower = TO_LOWER(first_name) | keep emp_no, first_name, name_lower | limit 1; + +emp_no:integer | first_name:keyword | name_lower:keyword +10001 | Georgi | georgi +; + + +toUpperRow#[skip:-8.12.99] +// tag::to_upper[] +ROW message = "Some Text" +| EVAL message_upper = TO_UPPER(message) +// end::to_upper[] +; + +// tag::to_upper-result[] +message:keyword | message_upper:keyword +Some Text | SOME TEXT +// end::to_upper-result[] +; + + +toUpper#[skip:-8.12.99] +from employees | sort emp_no | eval name_upper = TO_UPPER(first_name) | keep emp_no, first_name, name_upper | limit 1; + +emp_no:integer | first_name:keyword | name_upper:keyword +10001 | Georgi | GEORGI +; + + +toUpperLowerUnicode#[skip:-8.12.99] +row a = "π/2 + a + B + Λ ºC" | eval lower = to_lower(a), upper = to_upper(a) | keep a, upper, lower; + +a:keyword | upper:keyword | lower:keyword +π/2 + a + B + Λ ºC | Π/2 + A + B + Λ ºC | π/2 + a + b + λ ºc +; diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerEvaluator.java new file mode 100644 index 0000000000000..23f28385916c7 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerEvaluator.java @@ -0,0 +1,118 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.util.Locale; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToLower}. + * This class is generated. Do not edit it. + */ +public final class ToLowerEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator val; + + private final Locale locale; + + private final DriverContext driverContext; + + public ToLowerEvaluator(Source source, EvalOperator.ExpressionEvaluator val, Locale locale, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.val = val; + this.locale = locale; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock valBlock = (BytesRefBlock) val.eval(page)) { + BytesRefVector valVector = valBlock.asVector(); + if (valVector == null) { + return eval(page.getPositionCount(), valBlock); + } + return eval(page.getPositionCount(), valVector).asBlock(); + } + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock valBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef valScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (valBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (valBlock.getValueCount(p) != 1) { + if (valBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + result.appendBytesRef(ToLower.process(valBlock.getBytesRef(valBlock.getFirstValueIndex(p), valScratch), locale)); + } + return result.build(); + } + } + + public BytesRefVector eval(int positionCount, BytesRefVector valVector) { + try(BytesRefVector.Builder result = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { + BytesRef valScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + result.appendBytesRef(ToLower.process(valVector.getBytesRef(p, valScratch), locale)); + } + return result.build(); + } + } + + @Override + public String toString() { + return "ToLowerEvaluator[" + "val=" + val + ", locale=" + locale + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(val); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory val; + + private final Locale locale; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory val, Locale locale) { + this.source = source; + this.val = val; + this.locale = locale; + } + + @Override + public ToLowerEvaluator get(DriverContext context) { + return new ToLowerEvaluator(source, val.get(context), locale, context); + } + + @Override + public String toString() { + return "ToLowerEvaluator[" + "val=" + val + ", locale=" + locale + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperEvaluator.java new file mode 100644 index 0000000000000..5c3e86184d460 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperEvaluator.java @@ -0,0 +1,118 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import java.util.Locale; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToUpper}. + * This class is generated. Do not edit it. + */ +public final class ToUpperEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator val; + + private final Locale locale; + + private final DriverContext driverContext; + + public ToUpperEvaluator(Source source, EvalOperator.ExpressionEvaluator val, Locale locale, + DriverContext driverContext) { + this.warnings = new Warnings(source); + this.val = val; + this.locale = locale; + this.driverContext = driverContext; + } + + @Override + public Block eval(Page page) { + try (BytesRefBlock valBlock = (BytesRefBlock) val.eval(page)) { + BytesRefVector valVector = valBlock.asVector(); + if (valVector == null) { + return eval(page.getPositionCount(), valBlock); + } + return eval(page.getPositionCount(), valVector).asBlock(); + } + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock valBlock) { + try(BytesRefBlock.Builder result = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef valScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (valBlock.isNull(p)) { + result.appendNull(); + continue position; + } + if (valBlock.getValueCount(p) != 1) { + if (valBlock.getValueCount(p) > 1) { + warnings.registerException(new IllegalArgumentException("single-value function encountered multi-value")); + } + result.appendNull(); + continue position; + } + result.appendBytesRef(ToUpper.process(valBlock.getBytesRef(valBlock.getFirstValueIndex(p), valScratch), locale)); + } + return result.build(); + } + } + + public BytesRefVector eval(int positionCount, BytesRefVector valVector) { + try(BytesRefVector.Builder result = driverContext.blockFactory().newBytesRefVectorBuilder(positionCount)) { + BytesRef valScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + result.appendBytesRef(ToUpper.process(valVector.getBytesRef(p, valScratch), locale)); + } + return result.build(); + } + } + + @Override + public String toString() { + return "ToUpperEvaluator[" + "val=" + val + ", locale=" + locale + "]"; + } + + @Override + public void close() { + Releasables.closeExpectNoException(val); + } + + static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory val; + + private final Locale locale; + + public Factory(Source source, EvalOperator.ExpressionEvaluator.Factory val, Locale locale) { + this.source = source; + this.val = val; + this.locale = locale; + } + + @Override + public ToUpperEvaluator get(DriverContext context) { + return new ToUpperEvaluator(source, val.get(context), locale, context); + } + + @Override + public String toString() { + return "ToUpperEvaluator[" + "val=" + val + ", locale=" + locale + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 4ee6a346e2a87..f8d9bfbc160a8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -82,6 +82,8 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.string.Split; import org.elasticsearch.xpack.esql.expression.function.scalar.string.StartsWith; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Substring; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToLower; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToUpper; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Trim; import org.elasticsearch.xpack.esql.plan.logical.show.ShowFunctions; import org.elasticsearch.xpack.ql.expression.function.FunctionDefinition; @@ -154,7 +156,9 @@ private FunctionDefinition[][] functions() { def(Replace.class, Replace::new, "replace"), def(Right.class, Right::new, "right"), def(StartsWith.class, StartsWith::new, "starts_with"), - def(EndsWith.class, EndsWith::new, "ends_with") }, + def(EndsWith.class, EndsWith::new, "ends_with"), + def(ToLower.class, ToLower::new, "to_lower"), + def(ToUpper.class, ToUpper::new, "to_upper") }, // date new FunctionDefinition[] { def(DateDiff.class, DateDiff::new, "date_diff"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java new file mode 100644 index 0000000000000..c79a5f3e051ec --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLower.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.session.EsqlConfiguration; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.function.scalar.ConfigurationFunction; +import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.ql.session.Configuration; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.List; +import java.util.Locale; +import java.util.function.Function; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isString; + +public class ToLower extends ConfigurationFunction implements EvaluatorMapper { + + private final Expression field; + + @FunctionInfo( + returnType = { "keyword", "text" }, + description = "Returns a new string representing the input string converted to lower case." + ) + public ToLower( + Source source, + @Param(name = "str", type = { "keyword", "text" }, description = "The input string") Expression field, + Configuration configuration + ) { + super(source, List.of(field), configuration); + this.field = field; + } + + @Override + public DataType dataType() { + return field.dataType(); + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + return isString(field, sourceText(), DEFAULT); + } + + @Override + public boolean foldable() { + return field.foldable(); + } + + @Override + public Object fold() { + return EvaluatorMapper.super.fold(); + } + + @Evaluator + static BytesRef process(BytesRef val, @Fixed Locale locale) { + return BytesRefs.toBytesRef(val.utf8ToString().toLowerCase(locale)); + } + + @Override + public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { + var fieldEvaluator = toEvaluator.apply(field); + return new ToLowerEvaluator.Factory(source(), fieldEvaluator, ((EsqlConfiguration) configuration()).locale()); + } + + public Expression field() { + return field; + } + + public ToLower replaceChild(Expression child) { + return new ToLower(source(), child, configuration()); + } + + @Override + public Expression replaceChildren(List newChildren) { + assert newChildren.size() == 1; + return replaceChild(newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ToLower::new, field, configuration()); + } + + @Override + public ScriptTemplate asScript() { + throw new UnsupportedOperationException("functions do not support scripting"); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java new file mode 100644 index 0000000000000..7fc54947c0ce8 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpper.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.esql.session.EsqlConfiguration; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.function.scalar.ConfigurationFunction; +import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.ql.session.Configuration; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.List; +import java.util.Locale; +import java.util.function.Function; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isString; + +public class ToUpper extends ConfigurationFunction implements EvaluatorMapper { + + private final Expression field; + + @FunctionInfo( + returnType = { "keyword", "text" }, + description = "Returns a new string representing the input string converted to upper case." + ) + public ToUpper( + Source source, + @Param(name = "str", type = { "keyword", "text" }, description = "The input string") Expression field, + Configuration configuration + ) { + super(source, List.of(field), configuration); + this.field = field; + } + + @Override + public DataType dataType() { + return field.dataType(); + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + return isString(field, sourceText(), DEFAULT); + } + + @Override + public boolean foldable() { + return field.foldable(); + } + + @Override + public Object fold() { + return EvaluatorMapper.super.fold(); + } + + @Evaluator + static BytesRef process(BytesRef val, @Fixed Locale locale) { + return BytesRefs.toBytesRef(val.utf8ToString().toUpperCase(locale)); + } + + @Override + public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { + var fieldEvaluator = toEvaluator.apply(field); + return new ToUpperEvaluator.Factory(source(), fieldEvaluator, ((EsqlConfiguration) configuration()).locale()); + } + + public Expression field() { + return field; + } + + public ToUpper replaceChild(Expression child) { + return new ToUpper(source(), child, configuration()); + } + + @Override + public Expression replaceChildren(List newChildren) { + assert newChildren.size() == 1; + return replaceChild(newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ToUpper::new, field, configuration()); + } + + @Override + public ScriptTemplate asScript() { + throw new UnsupportedOperationException("functions do not support scripting"); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 6031d1b06ebfd..b6dce816db218 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -102,6 +102,8 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.string.Split; import org.elasticsearch.xpack.esql.expression.function.scalar.string.StartsWith; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Substring; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToLower; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.ToUpper; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Trim; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; @@ -367,6 +369,8 @@ public static List namedTypeEntries() { of(ScalarFunction.class, Split.class, PlanNamedTypes::writeSplit, PlanNamedTypes::readSplit), of(ScalarFunction.class, Tau.class, PlanNamedTypes::writeNoArgScalar, PlanNamedTypes::readNoArgScalar), of(ScalarFunction.class, Replace.class, PlanNamedTypes::writeReplace, PlanNamedTypes::readReplace), + of(ScalarFunction.class, ToLower.class, PlanNamedTypes::writeToLower, PlanNamedTypes::readToLower), + of(ScalarFunction.class, ToUpper.class, PlanNamedTypes::writeToUpper, PlanNamedTypes::readToUpper), // ArithmeticOperations of(ArithmeticOperation.class, Add.class, PlanNamedTypes::writeArithmeticOperation, PlanNamedTypes::readArithmeticOperation), of(ArithmeticOperation.class, Sub.class, PlanNamedTypes::writeArithmeticOperation, PlanNamedTypes::readArithmeticOperation), @@ -1450,6 +1454,22 @@ static void writeReplace(PlanStreamOutput out, Replace replace) throws IOExcepti out.writeExpression(fields.get(2)); } + static ToLower readToLower(PlanStreamInput in) throws IOException { + return new ToLower(Source.EMPTY, in.readExpression(), in.configuration()); + } + + static void writeToLower(PlanStreamOutput out, ToLower toLower) throws IOException { + out.writeExpression(toLower.field()); + } + + static ToUpper readToUpper(PlanStreamInput in) throws IOException { + return new ToUpper(Source.EMPTY, in.readExpression(), in.configuration()); + } + + static void writeToUpper(PlanStreamOutput out, ToUpper toUpper) throws IOException { + out.writeExpression(toUpper.field()); + } + static Left readLeft(PlanStreamInput in) throws IOException { return new Left(in.readSource(), in.readExpression(), in.readExpression()); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java new file mode 100644 index 0000000000000..bd9205c930d51 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.elasticsearch.xpack.esql.plugin.QueryPragmas; +import org.elasticsearch.xpack.esql.session.EsqlConfiguration; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.elasticsearch.xpack.ql.type.DateUtils; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class ToLowerTests extends AbstractFunctionTestCase { + public ToLowerTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + + suppliers.add(supplier("keyword ascii", DataTypes.KEYWORD, () -> randomAlphaOfLengthBetween(1, 10))); + suppliers.add(supplier("keyword unicode", DataTypes.KEYWORD, () -> randomUnicodeOfLengthBetween(1, 10))); + suppliers.add(supplier("text ascii", DataTypes.TEXT, () -> randomAlphaOfLengthBetween(1, 10))); + suppliers.add(supplier("text unicode", DataTypes.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); + + // add null as parameter + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + } + + public void testRandomLocale() { + String testString = randomAlphaOfLength(10); + EsqlConfiguration cfg = randomLocaleConfig(); + ToLower func = new ToLower(Source.EMPTY, new Literal(Source.EMPTY, testString, DataTypes.KEYWORD), cfg); + assertThat(BytesRefs.toBytesRef(testString.toLowerCase(cfg.locale())), equalTo(func.fold())); + } + + private EsqlConfiguration randomLocaleConfig() { + return new EsqlConfiguration( + DateUtils.UTC, + randomLocale(random()), + null, + null, + new QueryPragmas(Settings.EMPTY), + EsqlPlugin.QUERY_RESULT_TRUNCATION_MAX_SIZE.getDefault(Settings.EMPTY), + EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(Settings.EMPTY), + "", + false + ); + } + + @Override + protected Expression build(Source source, List args) { + return new ToLower(source, args.get(0), EsqlTestUtils.TEST_CFG); + } + + private static TestCaseSupplier supplier(String name, DataType type, Supplier valueSupplier) { + return new TestCaseSupplier(name, List.of(type), () -> { + List values = new ArrayList<>(); + String expectedToString = "ToLowerEvaluator[val=Attribute[channel=0], locale=en_US]"; + + String value = valueSupplier.get(); + values.add(new TestCaseSupplier.TypedData(new BytesRef(value), type, "0")); + + String expectedValue = value.toLowerCase(EsqlTestUtils.TEST_CFG.locale()); + return new TestCaseSupplier.TestCase(values, expectedToString, type, equalTo(new BytesRef(expectedValue))); + }); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java new file mode 100644 index 0000000000000..ce7c011f201d8 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.elasticsearch.xpack.esql.plugin.QueryPragmas; +import org.elasticsearch.xpack.esql.session.EsqlConfiguration; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.elasticsearch.xpack.ql.type.DateUtils; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class ToUpperTests extends AbstractFunctionTestCase { + public ToUpperTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + + suppliers.add(supplier("keyword ascii", DataTypes.KEYWORD, () -> randomAlphaOfLengthBetween(1, 10))); + suppliers.add(supplier("keyword unicode", DataTypes.KEYWORD, () -> randomUnicodeOfLengthBetween(1, 10))); + suppliers.add(supplier("text ascii", DataTypes.TEXT, () -> randomAlphaOfLengthBetween(1, 10))); + suppliers.add(supplier("text unicode", DataTypes.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); + + // add null as parameter + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + } + + public void testRandomLocale() { + String testString = randomAlphaOfLength(10); + EsqlConfiguration cfg = randomLocaleConfig(); + ToUpper func = new ToUpper(Source.EMPTY, new Literal(Source.EMPTY, testString, DataTypes.KEYWORD), cfg); + assertThat(BytesRefs.toBytesRef(testString.toUpperCase(cfg.locale())), equalTo(func.fold())); + } + + private EsqlConfiguration randomLocaleConfig() { + return new EsqlConfiguration( + DateUtils.UTC, + randomLocale(random()), + null, + null, + new QueryPragmas(Settings.EMPTY), + EsqlPlugin.QUERY_RESULT_TRUNCATION_MAX_SIZE.getDefault(Settings.EMPTY), + EsqlPlugin.QUERY_RESULT_TRUNCATION_DEFAULT_SIZE.getDefault(Settings.EMPTY), + "", + false + ); + } + + @Override + protected Expression build(Source source, List args) { + return new ToUpper(source, args.get(0), EsqlTestUtils.TEST_CFG); + } + + private static TestCaseSupplier supplier(String name, DataType type, Supplier valueSupplier) { + return new TestCaseSupplier(name, List.of(type), () -> { + List values = new ArrayList<>(); + String expectedToString = "ToUpperEvaluator[val=Attribute[channel=0], locale=en_US]"; + + String value = valueSupplier.get(); + values.add(new TestCaseSupplier.TypedData(new BytesRef(value), type, "0")); + + String expectedValue = value.toUpperCase(EsqlTestUtils.TEST_CFG.locale()); + return new TestCaseSupplier.TestCase(values, expectedToString, type, equalTo(new BytesRef(expectedValue))); + }); + } +} From d049273ce1a01f437dbcc7b4b1680ea55479c02c Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 15 Jan 2024 12:31:03 +0000 Subject: [PATCH 25/95] Fix deleting index during snapshot finalization (#103817) Today if an index is deleted during a very specific order of snapshot finalizations then it's possible we'll miscalculate the latest shard generations for the shards in that index, causing the deletion of a shard-level `index-UUID` blob which prevents further snapshots of that shard. Closes #101029 --- docs/changelog/103817.yaml | 6 + .../snapshots/ConcurrentSnapshotsIT.java | 110 ++++++++++++++++++ .../repositories/FinalizeSnapshotContext.java | 2 +- .../repositories/ShardGenerations.java | 5 + .../blobstore/BlobStoreRepository.java | 1 + .../snapshots/SnapshotsService.java | 58 +++++++-- 6 files changed, 169 insertions(+), 13 deletions(-) create mode 100644 docs/changelog/103817.yaml diff --git a/docs/changelog/103817.yaml b/docs/changelog/103817.yaml new file mode 100644 index 0000000000000..ff8978f1d3776 --- /dev/null +++ b/docs/changelog/103817.yaml @@ -0,0 +1,6 @@ +pr: 103817 +summary: Fix deleting index during snapshot finalization +area: Snapshot/Restore +type: bug +issues: + - 101029 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index 8d2e15f5027d5..1152cf5f03e5a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -17,17 +17,24 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexClusterStateUpdateRequest; +import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.metadata.MetadataDeleteIndexService; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException; import org.elasticsearch.core.PathUtils; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.discovery.AbstractDisruptionTestCase; +import org.elasticsearch.index.Index; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoryConflictException; @@ -36,6 +43,7 @@ import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.snapshots.mockstore.MockRepository; +import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.transport.MockTransportService; @@ -48,10 +56,12 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; @@ -2060,6 +2070,106 @@ public void testQueuedSnapshotAfterPartialWithIndexRecreate() throws Exception { assertSuccessful(partialFuture); } + public void testDeleteIndexWithOutOfOrderFinalization() { + + final var indexToDelete = "index-to-delete"; + final var indexNames = List.of(indexToDelete, "index-0", "index-1", "index-2"); + + for (final var indexName : indexNames) { + assertAcked(prepareCreate(indexName, indexSettingsNoReplicas(1))); + } + + final var repoName = "test-repo"; + createRepository(repoName, "fs"); + + // block the update-shard-snapshot-status requests so we can execute them in a specific order + final var masterTransportService = MockTransportService.getInstance(internalCluster().getMasterName()); + final Map> otherIndexSnapshotListeners = indexNames.stream() + .collect(Collectors.toMap(k -> k, k -> new SubscribableListener<>())); + masterTransportService.addRequestHandlingBehavior( + SnapshotsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, + (handler, request, channel, task) -> { + final var indexName = request.shardId().getIndexName(); + if (indexName.equals(indexToDelete)) { + handler.messageReceived(request, channel, task); + } else { + final var listener = otherIndexSnapshotListeners.get(indexName); + assertNotNull(indexName, listener); + listener.addListener( + ActionTestUtils.assertNoFailureListener(ignored -> handler.messageReceived(request, channel, task)) + ); + } + } + ); + + // start the snapshots, each targeting index-to-delete and one other index so we can control their finalization order + final var snapshotCompleters = new HashMap(); + for (final var blockingIndex : List.of("index-0", "index-1", "index-2")) { + final var snapshotName = "snapshot-with-" + blockingIndex; + final var snapshotFuture = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true) + .setPartial(true) + .setIndices(indexToDelete, blockingIndex) + .execute(); + + // ensure each snapshot has really started before moving on to the next one + safeAwait( + ClusterServiceUtils.addTemporaryStateListener( + internalCluster().getInstance(ClusterService.class), + cs -> SnapshotsInProgress.get(cs) + .forRepo(repoName) + .stream() + .anyMatch(e -> e.snapshot().getSnapshotId().getName().equals(snapshotName)) + ) + ); + + snapshotCompleters.put(blockingIndex, () -> { + assertFalse(snapshotFuture.isDone()); + otherIndexSnapshotListeners.get(blockingIndex).onResponse(null); + assertEquals(SnapshotState.SUCCESS, snapshotFuture.actionGet(10, TimeUnit.SECONDS).getSnapshotInfo().state()); + }); + } + + // set up to delete the index at a very specific moment during finalization + final var masterDeleteIndexService = internalCluster().getCurrentMasterNodeInstance(MetadataDeleteIndexService.class); + final var indexRecreatedListener = ClusterServiceUtils + // wait until the snapshot has entered finalization + .addTemporaryStateListener( + internalCluster().getInstance(ClusterService.class), + cs -> SnapshotsInProgress.get(cs) + .forRepo(repoName) + .stream() + .anyMatch(e -> e.snapshot().getSnapshotId().getName().equals("snapshot-with-index-1") && e.state().completed()) + ) + // execute the index deletion _directly on the master_ so it happens before the snapshot finalization executes + .andThen((l, ignored) -> masterDeleteIndexService.deleteIndices(new DeleteIndexClusterStateUpdateRequest(l.map(r -> { + assertTrue(r.isAcknowledged()); + return null; + })).indices(new Index[] { internalCluster().clusterService().state().metadata().index(indexToDelete).getIndex() }) + .ackTimeout(TimeValue.timeValueSeconds(10)) + .masterNodeTimeout(TimeValue.timeValueSeconds(10)))) + // ultimately create the index again so that taking a full snapshot will pick up any missing shard gen blob, and deleting that + // full snapshot will clean up all dangling shard-level blobs + .andThen((l, ignored) -> prepareCreate(indexToDelete, indexSettingsNoReplicas(1)).execute(l.map(r -> { + assertTrue(r.isAcknowledged()); + return null; + }))); + + // release the snapshots to be finalized, in this order + for (final var blockingIndex : List.of("index-1", "index-2", "index-0")) { + snapshotCompleters.get(blockingIndex).run(); + } + + safeAwait(indexRecreatedListener); + masterTransportService.clearAllRules(); + + // create a full snapshot to verify that the repo is still ok + createFullSnapshot(repoName, "final-full-snapshot"); + + // delete the full snapshot to clean up the leftover shard-level metadata (which trips repo consistency assertions otherwise) + startDeleteSnapshot(repoName, "final-full-snapshot").actionGet(10, TimeUnit.SECONDS); + } + private static void assertSnapshotStatusCountOnRepo(String otherBlockedRepoName, int count) { final SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(otherBlockedRepoName).get(); final List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); diff --git a/server/src/main/java/org/elasticsearch/repositories/FinalizeSnapshotContext.java b/server/src/main/java/org/elasticsearch/repositories/FinalizeSnapshotContext.java index 9129f6abd373c..b459e1cfc7338 100644 --- a/server/src/main/java/org/elasticsearch/repositories/FinalizeSnapshotContext.java +++ b/server/src/main/java/org/elasticsearch/repositories/FinalizeSnapshotContext.java @@ -100,7 +100,7 @@ public Map> obsoleteShardGenerations() { } public ClusterState updatedClusterState(ClusterState state) { - final ClusterState updatedState = SnapshotsService.stateWithoutSnapshot(state, snapshotInfo.snapshot()); + final ClusterState updatedState = SnapshotsService.stateWithoutSnapshot(state, snapshotInfo.snapshot(), updatedShardGenerations); obsoleteGenerations.set( SnapshotsInProgress.get(updatedState).obsoleteGenerations(snapshotInfo.repository(), SnapshotsInProgress.get(state)) ); diff --git a/server/src/main/java/org/elasticsearch/repositories/ShardGenerations.java b/server/src/main/java/org/elasticsearch/repositories/ShardGenerations.java index e42552d3e5f3c..4c34f2e192a26 100644 --- a/server/src/main/java/org/elasticsearch/repositories/ShardGenerations.java +++ b/server/src/main/java/org/elasticsearch/repositories/ShardGenerations.java @@ -142,6 +142,11 @@ public ShardGeneration getShardGen(IndexId indexId, int shardId) { return generations.get(shardId); } + public boolean hasShardGen(RepositoryShardId repositoryShardId) { + final var indexShardGens = getGens(repositoryShardId.index()); + return repositoryShardId.shardId() < indexShardGens.size() && indexShardGens.get(repositoryShardId.shardId()) != null; + } + public List getGens(IndexId indexId) { return shardGenerations.getOrDefault(indexId, Collections.emptyList()); } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 48caafc6bfab8..b8b0498d95125 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -1741,6 +1741,7 @@ private void cleanupOldMetadata( (indexId, gens) -> gens.forEach( (shardId, oldGen) -> toDelete.add( shardPath(indexId, shardId).buildAsString().substring(prefixPathLen) + INDEX_FILE_PREFIX + oldGen + .toBlobNamePart() ) ) ); diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index f973d456a6b79..bbabfca866a69 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -450,7 +450,7 @@ private void startCloning(Repository repository, SnapshotsInProgress.Entry clone endingSnapshots.add(targetSnapshot); initializingClones.remove(targetSnapshot); logger.info(() -> "Failed to start snapshot clone [" + cloneEntry + "]", e); - removeFailedSnapshotFromClusterState(targetSnapshot, e, null); + removeFailedSnapshotFromClusterState(targetSnapshot, e, null, ShardGenerations.EMPTY); }; // 1. step, load SnapshotInfo to make sure that source snapshot was successful for the indices we want to clone @@ -1312,7 +1312,12 @@ private void endSnapshot(SnapshotsInProgress.Entry entry, Metadata metadata, @Nu if (entry.isClone() && entry.state() == State.FAILED) { logger.debug("Removing failed snapshot clone [{}] from cluster state", entry); if (newFinalization) { - removeFailedSnapshotFromClusterState(snapshot, new SnapshotException(snapshot, entry.failure()), null); + removeFailedSnapshotFromClusterState( + snapshot, + new SnapshotException(snapshot, entry.failure()), + null, + ShardGenerations.EMPTY + ); } return; } @@ -1496,7 +1501,15 @@ private void finalizeSnapshotEntry(Snapshot snapshot, Metadata metadata, Reposit // a fatal like e.g. this node stopped being the master node snapshotListeners.onResponse(endAndGetListenersToResolve(snapshot)); runNextQueuedOperation(updatedRepositoryData, repository, true); - }, e -> handleFinalizationFailure(e, snapshot, repositoryData)), + }, + e -> handleFinalizationFailure( + e, + snapshot, + repositoryData, + // we might have written the new root blob before failing here, so we must use the updated shardGenerations + shardGenerations + ) + ), snInfo -> snapshotListeners.addListener(new ActionListener<>() { @Override public void onResponse(List> actionListeners) { @@ -1512,11 +1525,20 @@ public void onFailure(Exception e) { }) ) ); - }, e -> handleFinalizationFailure(e, snapshot, repositoryData))); + }, + e -> handleFinalizationFailure( + e, + snapshot, + repositoryData, + // a failure here means the root blob was not updated, but the updated shard generation blobs are all in place so we can + // use the updated shardGenerations for all pending shard snapshots + shardGenerations + ) + )); } catch (Exception e) { logger.error(Strings.format("unexpected failure finalizing %s", snapshot), e); assert false : new AssertionError("unexpected failure finalizing " + snapshot, e); - handleFinalizationFailure(e, snapshot, repositoryData); + handleFinalizationFailure(e, snapshot, repositoryData, ShardGenerations.EMPTY); } } @@ -1568,7 +1590,12 @@ private List> endAndGetListenersToResolve(Snapshot * @param snapshot snapshot that failed to finalize * @param repositoryData current repository data for the snapshot's repository */ - private void handleFinalizationFailure(Exception e, Snapshot snapshot, RepositoryData repositoryData) { + private void handleFinalizationFailure( + Exception e, + Snapshot snapshot, + RepositoryData repositoryData, + ShardGenerations shardGenerations + ) { if (ExceptionsHelper.unwrap(e, NotMasterException.class, FailedToCommitClusterStateException.class) != null) { // Failure due to not being master any more, don't try to remove snapshot from cluster state the next master // will try ending this snapshot again @@ -1581,7 +1608,7 @@ private void handleFinalizationFailure(Exception e, Snapshot snapshot, Repositor failAllListenersOnMasterFailOver(e); } else { logger.warn(() -> "[" + snapshot + "] failed to finalize snapshot", e); - removeFailedSnapshotFromClusterState(snapshot, e, repositoryData); + removeFailedSnapshotFromClusterState(snapshot, e, repositoryData, shardGenerations); } } @@ -1701,7 +1728,7 @@ private static Tuple> read * @param snapshot snapshot for which to remove the snapshot operation * @return updated cluster state */ - public static ClusterState stateWithoutSnapshot(ClusterState state, Snapshot snapshot) { + public static ClusterState stateWithoutSnapshot(ClusterState state, Snapshot snapshot, ShardGenerations shardGenerations) { final SnapshotsInProgress snapshots = SnapshotsInProgress.get(state); ClusterState result = state; int indexOfEntry = -1; @@ -1762,7 +1789,8 @@ public static ClusterState stateWithoutSnapshot(ClusterState state, Snapshot sna final ShardSnapshotStatus shardState = finishedShardEntry.getValue(); final RepositoryShardId repositoryShardId = finishedShardEntry.getKey(); if (shardState.state() != ShardState.SUCCESS - || previousEntry.shardsByRepoShardId().containsKey(repositoryShardId) == false) { + || previousEntry.shardsByRepoShardId().containsKey(repositoryShardId) == false + || shardGenerations.hasShardGen(finishedShardEntry.getKey()) == false) { continue; } updatedShardAssignments = maybeAddUpdatedAssignment( @@ -1779,7 +1807,8 @@ public static ClusterState stateWithoutSnapshot(ClusterState state, Snapshot sna .entrySet()) { final ShardSnapshotStatus shardState = finishedShardEntry.getValue(); if (shardState.state() == ShardState.SUCCESS - && previousEntry.shardsByRepoShardId().containsKey(finishedShardEntry.getKey())) { + && previousEntry.shardsByRepoShardId().containsKey(finishedShardEntry.getKey()) + && shardGenerations.hasShardGen(finishedShardEntry.getKey())) { updatedShardAssignments = maybeAddUpdatedAssignment( updatedShardAssignments, shardState, @@ -1862,13 +1891,18 @@ private static ImmutableOpenMap.Builder maybeAddUpda * @param repositoryData repository data if the next finalization operation on the repository should be attempted or {@code null} if * no further actions should be executed */ - private void removeFailedSnapshotFromClusterState(Snapshot snapshot, Exception failure, @Nullable RepositoryData repositoryData) { + private void removeFailedSnapshotFromClusterState( + Snapshot snapshot, + Exception failure, + @Nullable RepositoryData repositoryData, + ShardGenerations shardGenerations + ) { assert failure != null : "Failure must be supplied"; submitUnbatchedTask(REMOVE_SNAPSHOT_METADATA_TASK_SOURCE, new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - final ClusterState updatedState = stateWithoutSnapshot(currentState, snapshot); + final ClusterState updatedState = stateWithoutSnapshot(currentState, snapshot, shardGenerations); assert updatedState == currentState || endingSnapshots.contains(snapshot) : "did not track [" + snapshot + "] in ending snapshots while removing it from the cluster state"; // now check if there are any delete operations that refer to the just failed snapshot and remove the snapshot from them From 797fadceb7ce500acec9a8fc50c29dfe9c7c5456 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Mon, 15 Jan 2024 15:22:58 +0200 Subject: [PATCH 26/95] Use delta for comparing doubles in testEncodeDecodeNoSaturation (#104321) Fixes #103924 --- .../index/mapper/extras/ScaledFloatFieldMapperTests.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java index 13927962e5d58..d89e1aa6a9772 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapperTests.java @@ -463,7 +463,11 @@ public void testEncodeDecodeExactScalingFactor() { public void testEncodeDecodeNoSaturation() { double scalingFactor = randomValue(); double unsaturated = randomDoubleBetween(Long.MIN_VALUE / scalingFactor, Long.MAX_VALUE / scalingFactor, true); - assertThat(encodeDecode(unsaturated, scalingFactor), equalTo(Math.round(unsaturated * scalingFactor) / scalingFactor)); + assertEquals( + encodeDecode(unsaturated, scalingFactor), + Math.round(unsaturated * scalingFactor) / scalingFactor, + unsaturated * 1e-10 + ); } /** From 2b175653d9f70e65ea3dc2d2ac1f30c6bd79b2d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Mon, 15 Jan 2024 14:48:36 +0100 Subject: [PATCH 27/95] YAML test framework: separate `skip` and `requires` sections (#104140) * Introduce Prerequisites criteria (Predicate + factory) for modular skip decisions - Removed accessors to specific criteria from SkipSection (used only on tests), adjusted test assertions - Moved Features check (YAML test runner features) to SkipSection build time * Separated check for xpack/no_xpack Check for xpack is cluster-configuration (modules installed) dependent, while Features are meant to be "static" test-runner capabilities. We separate them so checks on one (test-runner features) can be run before and separately from the other. * Consolidate skip() methods - Divide require and skip predicates - Divide requires and skip parsing (distinct sections) - Renaming SkipSection to PrerequisiteSection and related methods/fields (e.g. skip -> evaluate) * Refactoring tests - moving and adding VersionRange tests - adding specific version and os skip tests - modified parse/validate/build to make SkipSection more unit-testable * Adding cluster feature-based skip criteria * Updated javadoc + renaming + better skip reason message --- .../rest/yaml/CcsCommonYamlTestSuiteIT.java | 2 +- .../rest/yaml/ESClientYamlSuiteTestCase.java | 20 +- .../yaml/section/ClientYamlTestSection.java | 14 +- .../yaml/section/ClientYamlTestSuite.java | 53 +- .../yaml/section/PrerequisiteSection.java | 413 ++++++++++++ .../{SkipCriteria.java => Prerequisites.java} | 29 +- .../test/rest/yaml/section/SetupSection.java | 16 +- .../test/rest/yaml/section/SkipSection.java | 255 ------- .../rest/yaml/section/TeardownSection.java | 16 +- .../section/ClientYamlTestSectionTests.java | 10 +- .../section/ClientYamlTestSuiteTests.java | 208 ++++-- .../section/PrerequisiteSectionTests.java | 630 ++++++++++++++++++ .../rest/yaml/section/SetupSectionTests.java | 10 +- .../rest/yaml/section/SkipSectionTests.java | 311 --------- .../yaml/section/TeardownSectionTests.java | 6 +- .../qa/single_node/EsqlClientYamlAsyncIT.java | 2 +- .../test/CoreTestTranslater.java | 4 +- 17 files changed, 1280 insertions(+), 719 deletions(-) create mode 100644 test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java rename test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/{SkipCriteria.java => Prerequisites.java} (54%) delete mode 100644 test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java create mode 100644 test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java delete mode 100644 test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index 88740edffc09a..e709b838a26f3 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -259,7 +259,7 @@ static ClientYamlTestCandidate rewrite(ClientYamlTestCandidate clientYamlTestCan new ClientYamlTestSection( testSection.getLocation(), testSection.getName(), - testSection.getSkipSection(), + testSection.getPrerequisiteSection(), modifiedExecutableSections ) ); diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 049102f87a544..4be9481df58b1 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -446,20 +446,10 @@ public void test() throws IOException { } // skip test if the whole suite (yaml file) is disabled - assumeFalse( - testCandidate.getSetupSection().getSkipSection().getSkipMessage(testCandidate.getSuitePath()), - testCandidate.getSetupSection().getSkipSection().skip(restTestExecutionContext) - ); - // skip test if the whole suite (yaml file) is disabled - assumeFalse( - testCandidate.getTeardownSection().getSkipSection().getSkipMessage(testCandidate.getSuitePath()), - testCandidate.getTeardownSection().getSkipSection().skip(restTestExecutionContext) - ); + testCandidate.getSetupSection().getPrerequisiteSection().evaluate(restTestExecutionContext, testCandidate.getSuitePath()); + testCandidate.getTeardownSection().getPrerequisiteSection().evaluate(restTestExecutionContext, testCandidate.getSuitePath()); // skip test if test section is disabled - assumeFalse( - testCandidate.getTestSection().getSkipSection().getSkipMessage(testCandidate.getTestPath()), - testCandidate.getTestSection().getSkipSection().skip(restTestExecutionContext) - ); + testCandidate.getTestSection().getPrerequisiteSection().evaluate(restTestExecutionContext, testCandidate.getTestPath()); // let's check that there is something to run, otherwise there might be a problem with the test section if (testCandidate.getTestSection().getExecutableSections().isEmpty()) { @@ -468,11 +458,11 @@ public void test() throws IOException { assumeFalse( "[" + testCandidate.getTestPath() + "] skipped, reason: in fips 140 mode", - inFipsJvm() && testCandidate.getTestSection().getSkipSection().yamlRunnerHasFeature("fips_140") + inFipsJvm() && testCandidate.getTestSection().getPrerequisiteSection().hasYamlRunnerFeature("fips_140") ); final Settings globalTemplateSettings = getGlobalTemplateSettings( - testCandidate.getTestSection().getSkipSection().yamlRunnerHasFeature("default_shards") + testCandidate.getTestSection().getPrerequisiteSection().hasYamlRunnerFeature("default_shards") ); if (globalTemplateSettings.isEmpty() == false && ESRestTestCase.has(ProductFeature.LEGACY_TEMPLATES)) { diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java index 740befe2f3a6a..f679a725c4feb 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSection.java @@ -28,7 +28,7 @@ public static ClientYamlTestSection parse(XContentParser parser) throws IOExcept List executableSections = new ArrayList<>(); try { parser.nextToken(); - SkipSection skipSection = SkipSection.parseIfNext(parser); + PrerequisiteSection prerequisiteSection = PrerequisiteSection.parseIfNext(parser); while (parser.currentToken() != XContentParser.Token.END_ARRAY) { ParserUtils.advanceToFieldName(parser); executableSections.add(ExecutableSection.parse(parser)); @@ -45,7 +45,7 @@ public static ClientYamlTestSection parse(XContentParser parser) throws IOExcept ); } parser.nextToken(); - return new ClientYamlTestSection(sectionLocation, sectionName, skipSection, executableSections); + return new ClientYamlTestSection(sectionLocation, sectionName, prerequisiteSection, executableSections); } catch (Exception e) { throw new ParsingException(parser.getTokenLocation(), "Error parsing test named [" + sectionName + "]", e); } @@ -53,18 +53,18 @@ public static ClientYamlTestSection parse(XContentParser parser) throws IOExcept private final XContentLocation location; private final String name; - private final SkipSection skipSection; + private final PrerequisiteSection prerequisiteSection; private final List executableSections; public ClientYamlTestSection( XContentLocation location, String name, - SkipSection skipSection, + PrerequisiteSection prerequisiteSection, List executableSections ) { this.location = location; this.name = name; - this.skipSection = Objects.requireNonNull(skipSection, "skip section cannot be null"); + this.prerequisiteSection = Objects.requireNonNull(prerequisiteSection, "skip section cannot be null"); this.executableSections = Collections.unmodifiableList(executableSections); } @@ -76,8 +76,8 @@ public String getName() { return name; } - public SkipSection getSkipSection() { - return skipSection; + public PrerequisiteSection getPrerequisiteSection() { + return prerequisiteSection; } public List getExecutableSections() { diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java index 48f24d3a935af..e5f46ff135171 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java @@ -175,9 +175,9 @@ private static Stream validateExecutableSections( .filter(section -> section instanceof DoSection) .map(section -> (DoSection) section) .filter(section -> false == section.getExpectedWarningHeaders().isEmpty()) - .filter(section -> false == hasSkipFeature("warnings", testSection, setupSection, teardownSection)) + .filter(section -> false == hasYamlRunnerFeature("warnings", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add a [do] with a [warnings] section without a corresponding ["skip": "features": "warnings"] \ + attempted to add a [do] with a [warnings] section without a corresponding ["requires": "test_runner_features": "warnings"] \ so runners that do not support the [warnings] section can skip the test at line [%d]\ """, section.getLocation().lineNumber())); @@ -187,10 +187,10 @@ private static Stream validateExecutableSections( .filter(section -> section instanceof DoSection) .map(section -> (DoSection) section) .filter(section -> false == section.getExpectedWarningHeadersRegex().isEmpty()) - .filter(section -> false == hasSkipFeature("warnings_regex", testSection, setupSection, teardownSection)) + .filter(section -> false == hasYamlRunnerFeature("warnings_regex", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ attempted to add a [do] with a [warnings_regex] section without a corresponding \ - ["skip": "features": "warnings_regex"] so runners that do not support the [warnings_regex] \ + ["requires": "test_runner_features": "warnings_regex"] so runners that do not support the [warnings_regex] \ section can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -201,10 +201,10 @@ private static Stream validateExecutableSections( .filter(section -> section instanceof DoSection) .map(section -> (DoSection) section) .filter(section -> false == section.getAllowedWarningHeaders().isEmpty()) - .filter(section -> false == hasSkipFeature("allowed_warnings", testSection, setupSection, teardownSection)) + .filter(section -> false == hasYamlRunnerFeature("allowed_warnings", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ attempted to add a [do] with a [allowed_warnings] section without a corresponding \ - ["skip": "features": "allowed_warnings"] so runners that do not support the [allowed_warnings] \ + ["requires": "test_runner_features": "allowed_warnings"] so runners that do not support the [allowed_warnings] \ section can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -215,11 +215,11 @@ private static Stream validateExecutableSections( .filter(section -> section instanceof DoSection) .map(section -> (DoSection) section) .filter(section -> false == section.getAllowedWarningHeadersRegex().isEmpty()) - .filter(section -> false == hasSkipFeature("allowed_warnings_regex", testSection, setupSection, teardownSection)) + .filter(section -> false == hasYamlRunnerFeature("allowed_warnings_regex", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ attempted to add a [do] with a [allowed_warnings_regex] section without a corresponding \ - ["skip": "features": "allowed_warnings_regex"] so runners that do not support the [allowed_warnings_regex] \ - section can skip the test at line [%d]\ + ["requires": "test_runner_features": "allowed_warnings_regex"] so runners that do not support the \ + [allowed_warnings_regex] section can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -229,10 +229,10 @@ private static Stream validateExecutableSections( .filter(section -> section instanceof DoSection) .map(section -> (DoSection) section) .filter(section -> NodeSelector.ANY != section.getApiCallSection().getNodeSelector()) - .filter(section -> false == hasSkipFeature("node_selector", testSection, setupSection, teardownSection)) + .filter(section -> false == hasYamlRunnerFeature("node_selector", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ attempted to add a [do] with a [node_selector] section without a corresponding \ - ["skip": "features": "node_selector"] so runners that do not support the [node_selector] section \ + ["requires": "test_runner_features": "node_selector"] so runners that do not support the [node_selector] section \ can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -241,9 +241,9 @@ private static Stream validateExecutableSections( errors, sections.stream() .filter(section -> section instanceof ContainsAssertion) - .filter(section -> false == hasSkipFeature("contains", testSection, setupSection, teardownSection)) + .filter(section -> false == hasYamlRunnerFeature("contains", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add a [contains] assertion without a corresponding ["skip": "features": "contains"] \ + attempted to add a [contains] assertion without a corresponding ["requires": "test_runner_features": "contains"] \ so runners that do not support the [contains] assertion can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -254,10 +254,11 @@ private static Stream validateExecutableSections( .filter(section -> section instanceof DoSection) .map(section -> (DoSection) section) .filter(section -> false == section.getApiCallSection().getHeaders().isEmpty()) - .filter(section -> false == hasSkipFeature("headers", testSection, setupSection, teardownSection)) + .filter(section -> false == hasYamlRunnerFeature("headers", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add a [do] with a [headers] section without a corresponding ["skip": "features": "headers"] \ - so runners that do not support the [headers] section can skip the test at line [%d]\ + attempted to add a [do] with a [headers] section without a corresponding \ + ["requires": "test_runner_features": "headers"] so runners that do not support the [headers] section \ + can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -265,9 +266,9 @@ private static Stream validateExecutableSections( errors, sections.stream() .filter(section -> section instanceof CloseToAssertion) - .filter(section -> false == hasSkipFeature("close_to", testSection, setupSection, teardownSection)) + .filter(section -> false == hasYamlRunnerFeature("close_to", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add a [close_to] assertion without a corresponding ["skip": "features": "close_to"] \ + attempted to add a [close_to] assertion without a corresponding ["requires": "test_runner_features": "close_to"] \ so runners that do not support the [close_to] assertion can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -276,9 +277,9 @@ private static Stream validateExecutableSections( errors, sections.stream() .filter(section -> section instanceof IsAfterAssertion) - .filter(section -> false == hasSkipFeature("is_after", testSection, setupSection, teardownSection)) + .filter(section -> false == hasYamlRunnerFeature("is_after", testSection, setupSection, teardownSection)) .map(section -> String.format(Locale.ROOT, """ - attempted to add an [is_after] assertion without a corresponding ["skip": "features": "is_after"] \ + attempted to add an [is_after] assertion without a corresponding ["requires": "test_runner_features": "is_after"] \ so runners that do not support the [is_after] assertion can skip the test at line [%d]\ """, section.getLocation().lineNumber())) ); @@ -286,19 +287,19 @@ private static Stream validateExecutableSections( return errors; } - private static boolean hasSkipFeature( + private static boolean hasYamlRunnerFeature( String feature, ClientYamlTestSection testSection, SetupSection setupSection, TeardownSection teardownSection ) { - return (testSection != null && hasSkipFeature(feature, testSection.getSkipSection())) - || (setupSection != null && hasSkipFeature(feature, setupSection.getSkipSection())) - || (teardownSection != null && hasSkipFeature(feature, teardownSection.getSkipSection())); + return (testSection != null && hasYamlRunnerFeature(feature, testSection.getPrerequisiteSection())) + || (setupSection != null && hasYamlRunnerFeature(feature, setupSection.getPrerequisiteSection())) + || (teardownSection != null && hasYamlRunnerFeature(feature, teardownSection.getPrerequisiteSection())); } - private static boolean hasSkipFeature(String feature, SkipSection skipSection) { - return skipSection != null && skipSection.yamlRunnerHasFeature(feature); + private static boolean hasYamlRunnerFeature(String feature, PrerequisiteSection prerequisiteSection) { + return prerequisiteSection != null && prerequisiteSection.hasYamlRunnerFeature(feature); } public List getTestSections() { diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java new file mode 100644 index 0000000000000..0c11c02110cab --- /dev/null +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSection.java @@ -0,0 +1,413 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.test.rest.yaml.section; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; +import org.elasticsearch.test.rest.yaml.Features; +import org.elasticsearch.xcontent.XContentLocation; +import org.elasticsearch.xcontent.XContentParser; +import org.junit.AssumptionViolatedException; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.function.Predicate; + +/** + * Represents a section where prerequisites to run a specific test section or suite are specified. It is possible to specify preconditions + * as a set of `skip` criteria (the test or suite will be skipped if the specified conditions are met) or `requires` criteria (the test or + * suite will be run only if the specified conditions are met) + * Criteria are based on: + * - the elasticsearch cluster version the tests are running against (deprecated) + * - the features supported by the elasticsearch cluster version the tests are running against + * - a specific test runner feature - some runners may not implement the whole set of features + * - an operating system (full name, including specific Linux distributions) - some OS might show a certain behavior + */ +public class PrerequisiteSection { + + private static final Logger logger = LogManager.getLogger(PrerequisiteSection.class); + + static class PrerequisiteSectionBuilder { + String skipVersionRange = null; + String skipReason = null; + String requiresReason = null; + List requiredYamlRunnerFeatures = new ArrayList<>(); + List skipOperatingSystems = new ArrayList<>(); + + Set skipClusterFeatures = new HashSet<>(); + Set requiredClusterFeatures = new HashSet<>(); + + enum XPackRequired { + NOT_SPECIFIED, + YES, + NO, + MISMATCHED + } + + XPackRequired xpackRequired = XPackRequired.NOT_SPECIFIED; + + public PrerequisiteSectionBuilder skipIfVersion(String skipVersionRange) { + this.skipVersionRange = skipVersionRange; + return this; + } + + public PrerequisiteSectionBuilder setSkipReason(String skipReason) { + this.skipReason = skipReason; + return this; + } + + public PrerequisiteSectionBuilder setRequiresReason(String requiresReason) { + this.requiresReason = requiresReason; + return this; + } + + public PrerequisiteSectionBuilder requireYamlRunnerFeature(String featureName) { + requiredYamlRunnerFeatures.add(featureName); + return this; + } + + public PrerequisiteSectionBuilder requireXPack() { + if (xpackRequired == XPackRequired.NO) { + xpackRequired = XPackRequired.MISMATCHED; + } else { + xpackRequired = XPackRequired.YES; + } + return this; + } + + public PrerequisiteSectionBuilder skipIfXPack() { + if (xpackRequired == XPackRequired.YES) { + xpackRequired = XPackRequired.MISMATCHED; + } else { + xpackRequired = XPackRequired.NO; + } + return this; + } + + public PrerequisiteSectionBuilder skipIfClusterFeature(String featureName) { + skipClusterFeatures.add(featureName); + return this; + } + + public PrerequisiteSectionBuilder requireClusterFeature(String featureName) { + requiredClusterFeatures.add(featureName); + return this; + } + + public PrerequisiteSectionBuilder skipIfOs(String osName) { + this.skipOperatingSystems.add(osName); + return this; + } + + void validate(XContentLocation contentLocation) { + if ((Strings.hasLength(skipVersionRange) == false) + && requiredYamlRunnerFeatures.isEmpty() + && skipOperatingSystems.isEmpty() + && xpackRequired == XPackRequired.NOT_SPECIFIED + && requiredClusterFeatures.isEmpty() + && skipClusterFeatures.isEmpty()) { + throw new ParsingException( + contentLocation, + "at least one criteria (version, cluster features, runner features, os) is mandatory within a skip section" + ); + } + if (Strings.hasLength(skipVersionRange) && Strings.hasLength(skipReason) == false) { + throw new ParsingException(contentLocation, "reason is mandatory within skip version section"); + } + if (skipOperatingSystems.isEmpty() == false && Strings.hasLength(skipReason) == false) { + throw new ParsingException(contentLocation, "reason is mandatory within skip os section"); + } + if (skipClusterFeatures.isEmpty() == false && Strings.hasLength(skipReason) == false) { + throw new ParsingException(contentLocation, "reason is mandatory within skip cluster_features section"); + } + if (requiredClusterFeatures.isEmpty() == false && Strings.hasLength(requiresReason) == false) { + throw new ParsingException(contentLocation, "reason is mandatory within requires cluster_features section"); + } + // make feature "skip_os" mandatory if os is given, this is a temporary solution until language client tests know about os + if (skipOperatingSystems.isEmpty() == false && requiredYamlRunnerFeatures.contains("skip_os") == false) { + throw new ParsingException(contentLocation, "if os is specified, test runner feature [skip_os] must be set"); + } + if (xpackRequired == XPackRequired.MISMATCHED) { + throw new ParsingException(contentLocation, "either [xpack] or [no_xpack] can be present, not both"); + } + if (Sets.haveNonEmptyIntersection(skipClusterFeatures, requiredClusterFeatures)) { + throw new ParsingException(contentLocation, "a cluster feature can be specified either in [requires] or [skip], not both"); + } + } + + public PrerequisiteSection build() { + final List> skipCriteriaList = new ArrayList<>(); + final List> requiresCriteriaList; + + // Check if the test runner supports all YAML framework features (see {@link Features}). If not, default to always skip this + // section. + if (Features.areAllSupported(requiredYamlRunnerFeatures) == false) { + requiresCriteriaList = List.of(Prerequisites.FALSE); + } else { + requiresCriteriaList = new ArrayList<>(); + if (xpackRequired == XPackRequired.YES) { + requiresCriteriaList.add(Prerequisites.hasXPack()); + } + if (xpackRequired == XPackRequired.NO) { + skipCriteriaList.add(Prerequisites.hasXPack()); + } + if (Strings.hasLength(skipVersionRange)) { + skipCriteriaList.add(Prerequisites.skipOnVersionRange(skipVersionRange)); + } + if (skipOperatingSystems.isEmpty() == false) { + skipCriteriaList.add(Prerequisites.skipOnOsList(skipOperatingSystems)); + } + if (requiredClusterFeatures.isEmpty() == false) { + requiresCriteriaList.add(Prerequisites.requireClusterFeatures(requiredClusterFeatures)); + } + if (skipClusterFeatures.isEmpty() == false) { + skipCriteriaList.add(Prerequisites.skipOnClusterFeatures(skipClusterFeatures)); + } + } + return new PrerequisiteSection(skipCriteriaList, skipReason, requiresCriteriaList, requiresReason, requiredYamlRunnerFeatures); + } + } + + /** + * Parse a {@link PrerequisiteSection} if the next field is {@code skip}, otherwise returns {@link PrerequisiteSection#EMPTY}. + */ + public static PrerequisiteSection parseIfNext(XContentParser parser) throws IOException { + return parseInternal(parser).build(); + } + + private static void maybeAdvanceToNextField(XContentParser parser) throws IOException { + var token = parser.nextToken(); + if (token != null && token != XContentParser.Token.END_ARRAY) { + ParserUtils.advanceToFieldName(parser); + } + } + + static PrerequisiteSectionBuilder parseInternal(XContentParser parser) throws IOException { + PrerequisiteSectionBuilder builder = new PrerequisiteSectionBuilder(); + var hasPrerequisiteSection = false; + var unknownFieldName = false; + ParserUtils.advanceToFieldName(parser); + while (unknownFieldName == false) { + if ("skip".equals(parser.currentName())) { + parseSkipSection(parser, builder); + hasPrerequisiteSection = true; + maybeAdvanceToNextField(parser); + } else if ("requires".equals(parser.currentName())) { + parseRequiresSection(parser, builder); + hasPrerequisiteSection = true; + maybeAdvanceToNextField(parser); + } else { + unknownFieldName = true; + } + } + if (hasPrerequisiteSection) { + builder.validate(parser.getTokenLocation()); + } + return builder; + } + + private static void parseFeatureField(String feature, PrerequisiteSectionBuilder builder) { + // #31403 introduced YAML test "features" to indicate if the cluster being tested has xpack installed (`xpack`) + // or if it does *not* have xpack installed (`no_xpack`). These are not test runner features, so now that we have + // "modular" skip criteria let's separate them. Eventually, these should move to their own skip section. + if (feature.equals("xpack")) { + builder.requireXPack(); + } else if (feature.equals("no_xpack")) { + builder.skipIfXPack(); + } else { + builder.requireYamlRunnerFeature(feature); + } + } + + // package private for tests + static void parseSkipSection(XContentParser parser, PrerequisiteSectionBuilder builder) throws IOException { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException( + "Expected [" + + XContentParser.Token.START_OBJECT + + ", found [" + + parser.currentToken() + + "], the skip section is not properly indented" + ); + } + String currentFieldName = null; + XContentParser.Token token; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if ("version".equals(currentFieldName)) { + builder.skipIfVersion(parser.text()); + } else if ("reason".equals(currentFieldName)) { + builder.setSkipReason(parser.text()); + } else if ("features".equals(currentFieldName)) { + // TODO: legacy - remove + logger.warn( + "[\"skip\": \"features\"] is deprecated and will be removed. Replace it with " + + "[\"requires\": \"test_runner_features\"]" + ); + parseFeatureField(parser.text(), builder); + } else if ("os".equals(currentFieldName)) { + builder.skipIfOs(parser.text()); + } else if ("cluster_features".equals(currentFieldName)) { + builder.skipIfClusterFeature(parser.text()); + } else { + throw new ParsingException( + parser.getTokenLocation(), + "field " + currentFieldName + " not supported within skip section" + ); + } + } else if (token == XContentParser.Token.START_ARRAY) { + // TODO: legacy - remove + logger.warn( + "[\"skip\": \"features\"] is deprecated and will be removed. Replace it with " + + "[\"requires\": \"test_runner_features\"]" + ); + if ("features".equals(currentFieldName)) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + parseFeatureField(parser.text(), builder); + } + } else if ("os".equals(currentFieldName)) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + builder.skipIfOs(parser.text()); + } + } else if ("cluster_features".equals(currentFieldName)) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + builder.skipIfClusterFeature(parser.text()); + } + } + } + } + parser.nextToken(); + } + + static void parseRequiresSection(XContentParser parser, PrerequisiteSectionBuilder builder) throws IOException { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException( + "Expected [" + + XContentParser.Token.START_OBJECT + + ", found [" + + parser.currentToken() + + "], the requires section is not properly indented" + ); + } + String currentFieldName = null; + XContentParser.Token token; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if ("reason".equals(currentFieldName)) { + builder.setRequiresReason(parser.text()); + } else if ("test_runner_features".equals(currentFieldName)) { + parseFeatureField(parser.text(), builder); + } else if ("cluster_features".equals(currentFieldName)) { + builder.requireClusterFeature(parser.text()); + } else { + throw new ParsingException( + parser.getTokenLocation(), + "field " + currentFieldName + " not supported within requires section" + ); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if ("test_runner_features".equals(currentFieldName)) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + parseFeatureField(parser.text(), builder); + } + } else if ("cluster_features".equals(currentFieldName)) { + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + builder.requireClusterFeature(parser.text()); + } + } + } + } + parser.nextToken(); + } + + public static final PrerequisiteSection EMPTY = new PrerequisiteSection(); + + private final List> skipCriteriaList; + private final List> requiresCriteriaList; + private final List yamlRunnerFeatures; + final String skipReason; + final String requireReason; + + private PrerequisiteSection() { + this.skipCriteriaList = new ArrayList<>(); + this.requiresCriteriaList = new ArrayList<>(); + this.yamlRunnerFeatures = new ArrayList<>(); + this.skipReason = null; + this.requireReason = null; + } + + PrerequisiteSection( + List> skipCriteriaList, + String skipReason, + List> requiresCriteriaList, + String requireReason, + List yamlRunnerFeatures + ) { + this.skipCriteriaList = skipCriteriaList; + this.requiresCriteriaList = requiresCriteriaList; + this.yamlRunnerFeatures = yamlRunnerFeatures; + this.skipReason = skipReason; + this.requireReason = requireReason; + } + + public boolean hasYamlRunnerFeature(String feature) { + return yamlRunnerFeatures.contains(feature); + } + + boolean skipCriteriaMet(ClientYamlTestExecutionContext context) { + return skipCriteriaList.stream().anyMatch(c -> c.test(context)); + } + + boolean requiresCriteriaMet(ClientYamlTestExecutionContext context) { + return requiresCriteriaList.stream().allMatch(c -> c.test(context)); + } + + public void evaluate(ClientYamlTestExecutionContext context, String testCandidateDescription) { + if (isEmpty()) { + return; + } + + if (requiresCriteriaMet(context) == false) { + throw new AssumptionViolatedException(buildMessage(testCandidateDescription, false)); + } + + if (skipCriteriaMet(context)) { + throw new AssumptionViolatedException(buildMessage(testCandidateDescription, true)); + } + } + + boolean isEmpty() { + return skipCriteriaList.isEmpty() && requiresCriteriaList.isEmpty() && yamlRunnerFeatures.isEmpty(); + } + + String buildMessage(String description, boolean isSkip) { + StringBuilder messageBuilder = new StringBuilder(); + messageBuilder.append("[").append(description).append("] skipped,"); + var reason = isSkip ? skipReason : requireReason; + if (Strings.isNullOrEmpty(reason) == false) { + messageBuilder.append(" reason: [").append(reason).append("]"); + } + if (yamlRunnerFeatures.isEmpty() == false) { + messageBuilder.append(" unsupported features ").append(yamlRunnerFeatures); + } + return messageBuilder.toString(); + } +} diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipCriteria.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java similarity index 54% rename from test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipCriteria.java rename to test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java index c864c778a8e76..8049c227b199e 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipCriteria.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/Prerequisites.java @@ -12,31 +12,36 @@ import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import java.util.List; +import java.util.Set; import java.util.function.Predicate; -public class SkipCriteria { +public class Prerequisites { - public static final Predicate SKIP_ALWAYS = context -> true; + public static final Predicate TRUE = context -> true; + public static final Predicate FALSE = context -> false; - private SkipCriteria() {} + private Prerequisites() {} - static Predicate fromVersionRange(String versionRange) { + static Predicate skipOnVersionRange(String versionRange) { final var versionRangePredicates = VersionRange.parseVersionRanges(versionRange); assert versionRangePredicates.isEmpty() == false; return context -> versionRangePredicates.stream().anyMatch(range -> range.test(context.nodesVersions())); } - static Predicate fromOsList(List operatingSystems) { + static Predicate skipOnOsList(List operatingSystems) { return context -> operatingSystems.stream().anyMatch(osName -> osName.equals(context.os())); } - static Predicate fromClusterModules(boolean xpackRequired) { + static Predicate hasXPack() { // TODO: change ESRestTestCase.hasXPack() to be context-specific - return context -> { - if (xpackRequired) { - return ESRestTestCase.hasXPack() == false; - } - return ESRestTestCase.hasXPack(); - }; + return context -> ESRestTestCase.hasXPack(); + } + + static Predicate requireClusterFeatures(Set clusterFeatures) { + return context -> clusterFeatures.stream().allMatch(context::clusterHasFeature); + } + + static Predicate skipOnClusterFeatures(Set clusterFeatures) { + return context -> clusterFeatures.stream().anyMatch(context::clusterHasFeature); } } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SetupSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SetupSection.java index 351cfa8e40ebc..ecf37c4b5cf64 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SetupSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SetupSection.java @@ -36,7 +36,7 @@ static SetupSection parseIfNext(XContentParser parser) throws IOException { } public static SetupSection parse(XContentParser parser) throws IOException { - SkipSection skipSection = SkipSection.parseIfNext(parser); + PrerequisiteSection prerequisiteSection = PrerequisiteSection.parseIfNext(parser); List executableSections = new ArrayList<>(); while (parser.currentToken() != XContentParser.Token.END_ARRAY) { ParserUtils.advanceToFieldName(parser); @@ -51,21 +51,21 @@ public static SetupSection parse(XContentParser parser) throws IOException { parser.nextToken(); } parser.nextToken(); - return new SetupSection(skipSection, executableSections); + return new SetupSection(prerequisiteSection, executableSections); } - public static final SetupSection EMPTY = new SetupSection(SkipSection.EMPTY, Collections.emptyList()); + public static final SetupSection EMPTY = new SetupSection(PrerequisiteSection.EMPTY, Collections.emptyList()); - private final SkipSection skipSection; + private final PrerequisiteSection prerequisiteSection; private final List executableSections; - public SetupSection(SkipSection skipSection, List executableSections) { - this.skipSection = Objects.requireNonNull(skipSection, "skip section cannot be null"); + public SetupSection(PrerequisiteSection prerequisiteSection, List executableSections) { + this.prerequisiteSection = Objects.requireNonNull(prerequisiteSection, "skip section cannot be null"); this.executableSections = Collections.unmodifiableList(executableSections); } - public SkipSection getSkipSection() { - return skipSection; + public PrerequisiteSection getPrerequisiteSection() { + return prerequisiteSection; } public List getExecutableSections() { diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java deleted file mode 100644 index 4bd80fa4d9f13..0000000000000 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/SkipSection.java +++ /dev/null @@ -1,255 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -package org.elasticsearch.test.rest.yaml.section; - -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.Strings; -import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; -import org.elasticsearch.test.rest.yaml.Features; -import org.elasticsearch.xcontent.XContentLocation; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.function.Predicate; - -/** - * Represents a skip section that tells whether a specific test section or suite needs to be skipped - * based on: - * - the elasticsearch version the tests are running against - * - a specific test feature required that might not be implemented yet by the runner - * - an operating system (full name, including specific Linux distributions) that might show a certain behavior - */ -public class SkipSection { - - static class SkipSectionBuilder { - String version = null; - String reason = null; - List testFeatures = new ArrayList<>(); - List operatingSystems = new ArrayList<>(); - - enum XPackRequested { - NOT_SPECIFIED, - YES, - NO, - MISMATCHED - } - - XPackRequested xpackRequested = XPackRequested.NOT_SPECIFIED; - - public SkipSectionBuilder withVersion(String version) { - this.version = version; - return this; - } - - public SkipSectionBuilder withReason(String reason) { - this.reason = reason; - return this; - } - - public SkipSectionBuilder withTestFeature(String featureName) { - this.testFeatures.add(featureName); - return this; - } - - public void withXPack(boolean xpackRequired) { - if (xpackRequired && xpackRequested == XPackRequested.NO || xpackRequired == false && xpackRequested == XPackRequested.YES) { - xpackRequested = XPackRequested.MISMATCHED; - } else { - xpackRequested = xpackRequired ? XPackRequested.YES : XPackRequested.NO; - } - } - - public SkipSectionBuilder withOs(String osName) { - this.operatingSystems.add(osName); - return this; - } - - void validate(XContentLocation contentLocation) { - if ((Strings.hasLength(version) == false) - && testFeatures.isEmpty() - && operatingSystems.isEmpty() - && xpackRequested == XPackRequested.NOT_SPECIFIED) { - throw new ParsingException( - contentLocation, - "at least one criteria (version, test features, os) is mandatory within a skip section" - ); - } - if (Strings.hasLength(version) && Strings.hasLength(reason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within skip version section"); - } - if (operatingSystems.isEmpty() == false && Strings.hasLength(reason) == false) { - throw new ParsingException(contentLocation, "reason is mandatory within skip version section"); - } - // make feature "skip_os" mandatory if os is given, this is a temporary solution until language client tests know about os - if (operatingSystems.isEmpty() == false && testFeatures.contains("skip_os") == false) { - throw new ParsingException(contentLocation, "if os is specified, feature skip_os must be set"); - } - if (xpackRequested == XPackRequested.MISMATCHED) { - throw new ParsingException(contentLocation, "either `xpack` or `no_xpack` can be present, not both"); - } - } - - public SkipSection build() { - final List> skipCriteriaList; - - // Check if the test runner supports all YAML framework features (see {@link Features}). If not, default to always skip this - // section. - if (Features.areAllSupported(testFeatures) == false) { - skipCriteriaList = List.of(SkipCriteria.SKIP_ALWAYS); - } else { - skipCriteriaList = new ArrayList<>(); - if (xpackRequested == XPackRequested.YES || xpackRequested == XPackRequested.NO) { - skipCriteriaList.add(SkipCriteria.fromClusterModules(xpackRequested == XPackRequested.YES)); - } - if (Strings.hasLength(version)) { - skipCriteriaList.add(SkipCriteria.fromVersionRange(version)); - } - if (operatingSystems.isEmpty() == false) { - skipCriteriaList.add(SkipCriteria.fromOsList(operatingSystems)); - } - } - return new SkipSection(skipCriteriaList, testFeatures, reason); - } - } - - /** - * Parse a {@link SkipSection} if the next field is {@code skip}, otherwise returns {@link SkipSection#EMPTY}. - */ - public static SkipSection parseIfNext(XContentParser parser) throws IOException { - ParserUtils.advanceToFieldName(parser); - - if ("skip".equals(parser.currentName())) { - SkipSection section = parse(parser); - parser.nextToken(); - return section; - } - - return EMPTY; - } - - public static SkipSection parse(XContentParser parser) throws IOException { - return parseInternal(parser).build(); - } - - private static void parseFeature(String feature, SkipSectionBuilder builder) { - // #31403 introduced YAML test "features" to indicate if the cluster being tested has xpack installed (`xpack`) - // or if it does *not* have xpack installed (`no_xpack`). These are not test runner features, so now that we have - // "modular" skip criteria let's separate them. Eventually, these should move to their own skip section. - if (feature.equals("xpack")) { - builder.withXPack(true); - } else if (feature.equals("no_xpack")) { - builder.withXPack(false); - } else { - builder.withTestFeature(feature); - } - } - - // package private for tests - static SkipSectionBuilder parseInternal(XContentParser parser) throws IOException { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new IllegalArgumentException( - "Expected [" - + XContentParser.Token.START_OBJECT - + ", found [" - + parser.currentToken() - + "], the skip section is not properly indented" - ); - } - String currentFieldName = null; - XContentParser.Token token; - - var builder = new SkipSectionBuilder(); - - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if ("version".equals(currentFieldName)) { - builder.withVersion(parser.text()); - } else if ("reason".equals(currentFieldName)) { - builder.withReason(parser.text()); - } else if ("features".equals(currentFieldName)) { - parseFeature(parser.text(), builder); - } else if ("os".equals(currentFieldName)) { - builder.withOs(parser.text()); - } else { - throw new ParsingException( - parser.getTokenLocation(), - "field " + currentFieldName + " not supported within skip section" - ); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("features".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - parseFeature(parser.text(), builder); - } - } else if ("os".equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - builder.withOs(parser.text()); - } - } - } - } - - parser.nextToken(); - builder.validate(parser.getTokenLocation()); - return builder; - } - - public static final SkipSection EMPTY = new SkipSection(); - - private final List> skipCriteriaList; - private final List yamlRunnerFeatures; - private final String reason; - - private SkipSection() { - this.skipCriteriaList = new ArrayList<>(); - this.yamlRunnerFeatures = new ArrayList<>(); - this.reason = null; - } - - SkipSection(List> skipCriteriaList, List yamlRunnerFeatures, String reason) { - this.skipCriteriaList = skipCriteriaList; - this.yamlRunnerFeatures = yamlRunnerFeatures; - this.reason = reason; - } - - public boolean yamlRunnerHasFeature(String feature) { - return yamlRunnerFeatures.contains(feature); - } - - public String getReason() { - return reason; - } - - public boolean skip(ClientYamlTestExecutionContext context) { - if (isEmpty()) { - return false; - } - - return skipCriteriaList.stream().anyMatch(c -> c.test(context)); - } - - public boolean isEmpty() { - return EMPTY.equals(this); - } - - public String getSkipMessage(String description) { - StringBuilder messageBuilder = new StringBuilder(); - messageBuilder.append("[").append(description).append("] skipped,"); - if (reason != null) { - messageBuilder.append(" reason: [").append(getReason()).append("]"); - } - if (yamlRunnerFeatures.isEmpty() == false) { - messageBuilder.append(" unsupported features ").append(yamlRunnerFeatures); - } - return messageBuilder.toString(); - } -} diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/TeardownSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/TeardownSection.java index 6821378463749..ca76ee92bb3c5 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/TeardownSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/TeardownSection.java @@ -35,7 +35,7 @@ static TeardownSection parseIfNext(XContentParser parser) throws IOException { } public static TeardownSection parse(XContentParser parser) throws IOException { - SkipSection skipSection = SkipSection.parseIfNext(parser); + PrerequisiteSection prerequisiteSection = PrerequisiteSection.parseIfNext(parser); List executableSections = new ArrayList<>(); while (parser.currentToken() != XContentParser.Token.END_ARRAY) { ParserUtils.advanceToFieldName(parser); @@ -50,21 +50,21 @@ public static TeardownSection parse(XContentParser parser) throws IOException { } parser.nextToken(); - return new TeardownSection(skipSection, executableSections); + return new TeardownSection(prerequisiteSection, executableSections); } - public static final TeardownSection EMPTY = new TeardownSection(SkipSection.EMPTY, Collections.emptyList()); + public static final TeardownSection EMPTY = new TeardownSection(PrerequisiteSection.EMPTY, Collections.emptyList()); - private final SkipSection skipSection; + private final PrerequisiteSection prerequisiteSection; private final List doSections; - TeardownSection(SkipSection skipSection, List doSections) { - this.skipSection = Objects.requireNonNull(skipSection, "skip section cannot be null"); + TeardownSection(PrerequisiteSection prerequisiteSection, List doSections) { + this.prerequisiteSection = Objects.requireNonNull(prerequisiteSection, "skip section cannot be null"); this.doSections = Collections.unmodifiableList(doSections); } - public SkipSection getSkipSection() { - return skipSection; + public PrerequisiteSection getPrerequisiteSection() { + return prerequisiteSection; } public List getDoSections() { diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 0ee275fc89c15..2c6e7e30e0d46 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -69,7 +69,7 @@ public void testParseTestSectionWithDoSection() throws Exception { assertThat(testSection, notNullValue()); assertThat(testSection.getName(), equalTo("First test section")); - assertThat(testSection.getSkipSection(), equalTo(SkipSection.EMPTY)); + assertThat(testSection.getPrerequisiteSection().isEmpty(), equalTo(true)); assertThat(testSection.getExecutableSections().size(), equalTo(1)); DoSection doSection = (DoSection) testSection.getExecutableSections().get(0); assertThat(doSection.getCatch(), equalTo("missing")); @@ -96,8 +96,8 @@ public void testParseTestSectionWithDoSetAndSkipSectionsNoSkip() throws Exceptio assertThat(testSection, notNullValue()); assertThat(testSection.getName(), equalTo("First test section")); - assertThat(testSection.getSkipSection(), notNullValue()); - assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); + assertThat(testSection.getPrerequisiteSection(), notNullValue()); + assertThat(testSection.getPrerequisiteSection().skipReason, equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(testSection.getExecutableSections().size(), equalTo(2)); DoSection doSection = (DoSection) testSection.getExecutableSections().get(0); assertThat(doSection.getCatch(), equalTo("missing")); @@ -130,7 +130,7 @@ public void testParseTestSectionWithMultipleDoSections() throws Exception { assertThat(testSection, notNullValue()); assertThat(testSection.getName(), equalTo("Basic")); - assertThat(testSection.getSkipSection(), equalTo(SkipSection.EMPTY)); + assertThat(testSection.getPrerequisiteSection().isEmpty(), equalTo(true)); assertThat(testSection.getExecutableSections().size(), equalTo(2)); DoSection doSection = (DoSection) testSection.getExecutableSections().get(0); assertThat(doSection.getCatch(), nullValue()); @@ -181,7 +181,7 @@ public void testParseTestSectionWithDoSectionsAndAssertions() throws Exception { assertThat(testSection, notNullValue()); assertThat(testSection.getName(), equalTo("Basic")); - assertThat(testSection.getSkipSection(), equalTo(SkipSection.EMPTY)); + assertThat(testSection.getPrerequisiteSection().isEmpty(), equalTo(true)); assertThat(testSection.getExecutableSections().size(), equalTo(10)); DoSection doSection = (DoSection) testSection.getExecutableSections().get(0); diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index c64a30378e9d6..1f5bdc71dde37 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -34,6 +34,51 @@ import static org.hamcrest.Matchers.nullValue; public class ClientYamlTestSuiteTests extends AbstractClientYamlTestFragmentParserTestCase { + + public void testParseTestSetupWithSkip() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + --- + setup: + - skip: + version: "8.7.00 - 8.9.99" + reason: "Synthetic source shows up in the mapping in 8.10 and on, may trigger assert failures in mixed cluster tests" + + --- + date: + - skip: + version: " - 8.1.99" + reason: tsdb indexing changed in 8.2.0 + - do: + indices.get_mapping: + index: test_index + + - match: {test_index.test_type.properties.text.type: string} + - match: {test_index.test_type.properties.text.analyzer: whitespace} + """); + + ClientYamlTestSuite restTestSuite = ClientYamlTestSuite.parse(getTestClass().getName(), getTestName(), Optional.empty(), parser); + + assertThat(restTestSuite, notNullValue()); + assertThat(restTestSuite.getName(), equalTo(getTestName())); + assertThat(restTestSuite.getFile().isPresent(), equalTo(false)); + assertThat(restTestSuite.getSetupSection(), notNullValue()); + + assertThat(restTestSuite.getSetupSection().isEmpty(), equalTo(false)); + assertThat(restTestSuite.getSetupSection().getPrerequisiteSection().isEmpty(), equalTo(false)); + assertThat(restTestSuite.getSetupSection().getExecutableSections().isEmpty(), equalTo(true)); + + assertThat(restTestSuite.getTestSections().size(), equalTo(1)); + + assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("date")); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().isEmpty(), equalTo(false)); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(3)); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class)); + DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0); + assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_mapping")); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(1)); + assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index")); + } + public void testParseTestSetupTeardownAndSections() throws Exception { final boolean includeSetup = randomBoolean(); final boolean includeTeardown = randomBoolean(); @@ -92,7 +137,7 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(restTestSuite.getSetupSection(), notNullValue()); if (includeSetup) { assertThat(restTestSuite.getSetupSection().isEmpty(), equalTo(false)); - assertThat(restTestSuite.getSetupSection().getSkipSection().isEmpty(), equalTo(true)); + assertThat(restTestSuite.getSetupSection().getPrerequisiteSection().isEmpty(), equalTo(true)); assertThat(restTestSuite.getSetupSection().getExecutableSections().size(), equalTo(1)); final ExecutableSection maybeDoSection = restTestSuite.getSetupSection().getExecutableSections().get(0); assertThat(maybeDoSection, instanceOf(DoSection.class)); @@ -107,7 +152,7 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(restTestSuite.getTeardownSection(), notNullValue()); if (includeTeardown) { assertThat(restTestSuite.getTeardownSection().isEmpty(), equalTo(false)); - assertThat(restTestSuite.getTeardownSection().getSkipSection().isEmpty(), equalTo(true)); + assertThat(restTestSuite.getTeardownSection().getPrerequisiteSection().isEmpty(), equalTo(true)); assertThat(restTestSuite.getTeardownSection().getDoSections().size(), equalTo(1)); assertThat( ((DoSection) restTestSuite.getTeardownSection().getDoSections().get(0)).getApiCallSection().getApi(), @@ -128,7 +173,7 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(restTestSuite.getTestSections().size(), equalTo(2)); assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Get index mapping")); - assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true)); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().isEmpty(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(3)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class)); DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0); @@ -145,9 +190,9 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace")); assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Get type mapping - pre 6.0")); - assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false)); + assertThat(restTestSuite.getTestSections().get(1).getPrerequisiteSection().isEmpty(), equalTo(false)); assertThat( - restTestSuite.getTestSections().get(1).getSkipSection().getReason(), + restTestSuite.getTestSections().get(1).getPrerequisiteSection().skipReason, equalTo("for newer versions the index name is always returned") ); @@ -209,7 +254,7 @@ public void testParseTestSingleTestSection() throws Exception { assertThat(restTestSuite.getTestSections().size(), equalTo(1)); assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Index with ID")); - assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true)); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().isEmpty(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(12)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class)); DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0); @@ -322,7 +367,7 @@ public void testParseTestMultipleTestSections() throws Exception { assertThat(restTestSuite.getTestSections().size(), equalTo(2)); assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Missing document (partial doc)")); - assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true)); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().isEmpty(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(2)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class)); @@ -339,7 +384,7 @@ public void testParseTestMultipleTestSections() throws Exception { assertThat(doSection.getApiCallSection().hasBody(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Missing document (script)")); - assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(true)); + assertThat(restTestSuite.getTestSections().get(1).getPrerequisiteSection().isEmpty(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(2)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(1), instanceOf(DoSection.class)); @@ -418,9 +463,44 @@ public void testParseSkipOs() throws Exception { assertThat(restTestSuite.getTestSections().size(), equalTo(1)); assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Broken on some os")); - assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(false)); - assertThat(restTestSuite.getTestSections().get(0).getSkipSection().getReason(), equalTo("not supported")); - assertThat(restTestSuite.getTestSections().get(0).getSkipSection().yamlRunnerHasFeature("skip_os"), equalTo(true)); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().isEmpty(), equalTo(false)); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().skipReason, containsString("not supported")); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().hasYamlRunnerFeature("skip_os"), equalTo(true)); + } + + public void testParseSkipAndRequireClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + "Broken on some os": + + - skip: + cluster_features: [unsupported-feature1, unsupported-feature2] + reason: "unsupported-features are not supported" + - requires: + cluster_features: required-feature1 + reason: "required-feature1 is required" + - do: + indices.get_mapping: + index: test_index + type: test_type + + - match: {test_type.properties.text.type: string} + - match: {test_type.properties.text.analyzer: whitespace} + """); + + ClientYamlTestSuite restTestSuite = ClientYamlTestSuite.parse(getTestClass().getName(), getTestName(), Optional.empty(), parser); + + assertThat(restTestSuite, notNullValue()); + assertThat(restTestSuite.getName(), equalTo(getTestName())); + assertThat(restTestSuite.getFile().isPresent(), equalTo(false)); + assertThat(restTestSuite.getTestSections().size(), equalTo(1)); + + assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Broken on some os")); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().isEmpty(), equalTo(false)); + assertThat( + restTestSuite.getTestSections().get(0).getPrerequisiteSection().skipReason, + equalTo("unsupported-features are not supported") + ); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().requireReason, equalTo("required-feature1 is required")); } public void testParseFileWithSingleTestSection() throws Exception { @@ -453,7 +533,7 @@ public void testParseFileWithSingleTestSection() throws Exception { assertThat(restTestSuite.getTestSections().size(), equalTo(1)); assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Index with ID")); - assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true)); + assertThat(restTestSuite.getTestSections().get(0).getPrerequisiteSection().isEmpty(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(2)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class)); DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0); @@ -473,7 +553,7 @@ public void testAddingDoWithoutSkips() { ClientYamlTestSection section = new ClientYamlTestSection( new XContentLocation(0, 0), "test", - SkipSection.EMPTY, + PrerequisiteSection.EMPTY, Collections.singletonList(doSection) ); ClientYamlTestSuite clientYamlTestSuite = new ClientYamlTestSuite( @@ -492,11 +572,11 @@ public void testAddingDoWithWarningWithoutSkipWarnings() { DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setExpectedWarningHeaders(singletonList("foo")); doSection.setApiCallSection(new ApiCallSection("test")); - ClientYamlTestSuite testSuite = createTestSuite(SkipSection.EMPTY, doSection); + ClientYamlTestSuite testSuite = createTestSuite(PrerequisiteSection.EMPTY, doSection); Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [warnings] section without a corresponding ["skip": "features": "warnings"] \ + attempted to add a [do] with a [warnings] section without a corresponding ["requires": "test_runner_features": "warnings"] \ so runners that do not support the [warnings] section can skip the test at line [%d]\ """, lineNumber))); } @@ -506,11 +586,12 @@ public void testAddingDoWithWarningRegexWithoutSkipWarnings() { DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setExpectedWarningHeadersRegex(singletonList(Pattern.compile("foo"))); doSection.setApiCallSection(new ApiCallSection("test")); - ClientYamlTestSuite testSuite = createTestSuite(SkipSection.EMPTY, doSection); + ClientYamlTestSuite testSuite = createTestSuite(PrerequisiteSection.EMPTY, doSection); Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [warnings_regex] section without a corresponding ["skip": "features": "warnings_regex"] \ + attempted to add a [do] with a [warnings_regex] section without a corresponding \ + ["requires": "test_runner_features": "warnings_regex"] \ so runners that do not support the [warnings_regex] section can skip the test at line [%d]\ """, lineNumber))); } @@ -520,11 +601,11 @@ public void testAddingDoWithAllowedWarningWithoutSkipAllowedWarnings() { DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setAllowedWarningHeaders(singletonList("foo")); doSection.setApiCallSection(new ApiCallSection("test")); - ClientYamlTestSuite testSuite = createTestSuite(SkipSection.EMPTY, doSection); + ClientYamlTestSuite testSuite = createTestSuite(PrerequisiteSection.EMPTY, doSection); Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [allowed_warnings] section without a corresponding ["skip": "features": \ + attempted to add a [do] with a [allowed_warnings] section without a corresponding ["requires": "test_runner_features": \ "allowed_warnings"] so runners that do not support the [allowed_warnings] section can skip the test at \ line [%d]\ """, lineNumber))); @@ -535,11 +616,11 @@ public void testAddingDoWithAllowedWarningRegexWithoutSkipAllowedWarnings() { DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setAllowedWarningHeadersRegex(singletonList(Pattern.compile("foo"))); doSection.setApiCallSection(new ApiCallSection("test")); - ClientYamlTestSuite testSuite = createTestSuite(SkipSection.EMPTY, doSection); + ClientYamlTestSuite testSuite = createTestSuite(PrerequisiteSection.EMPTY, doSection); Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [allowed_warnings_regex] section without a corresponding ["skip": "features": \ + attempted to add a [do] with a [allowed_warnings_regex] section without a corresponding ["requires": "test_runner_features": \ "allowed_warnings_regex"] so runners that do not support the [allowed_warnings_regex] section can skip the test \ at line [%d]\ """, lineNumber))); @@ -551,11 +632,11 @@ public void testAddingDoWithHeaderWithoutSkipHeaders() { ApiCallSection apiCallSection = new ApiCallSection("test"); apiCallSection.addHeaders(Collections.singletonMap("header", "value")); doSection.setApiCallSection(apiCallSection); - ClientYamlTestSuite testSuite = createTestSuite(SkipSection.EMPTY, doSection); + ClientYamlTestSuite testSuite = createTestSuite(PrerequisiteSection.EMPTY, doSection); Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [headers] section without a corresponding ["skip": "features": "headers"] \ + attempted to add a [do] with a [headers] section without a corresponding ["requires": "test_runner_features": "headers"] \ so runners that do not support the [headers] section can skip the test at line [%d]\ """, lineNumber))); } @@ -566,11 +647,12 @@ public void testAddingDoWithNodeSelectorWithoutSkipNodeSelector() { ApiCallSection apiCall = new ApiCallSection("test"); apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); doSection.setApiCallSection(apiCall); - ClientYamlTestSuite testSuite = createTestSuite(SkipSection.EMPTY, doSection); + ClientYamlTestSuite testSuite = createTestSuite(PrerequisiteSection.EMPTY, doSection); Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [do] with a [node_selector] section without a corresponding ["skip": "features": "node_selector"] \ + attempted to add a [do] with a [node_selector] section without a corresponding \ + ["requires": "test_runner_features": "node_selector"] \ so runners that do not support the [node_selector] section can skip the test at line [%d]\ """, lineNumber))); } @@ -582,11 +664,11 @@ public void testAddingContainsWithoutSkipContains() { randomAlphaOfLength(randomIntBetween(3, 30)), randomDouble() ); - ClientYamlTestSuite testSuite = createTestSuite(SkipSection.EMPTY, containsAssertion); + ClientYamlTestSuite testSuite = createTestSuite(PrerequisiteSection.EMPTY, containsAssertion); Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertThat(e.getMessage(), containsString(Strings.format(""" api/name: - attempted to add a [contains] assertion without a corresponding ["skip": "features": "contains"] \ + attempted to add a [contains] assertion without a corresponding ["requires": "test_runner_features": "contains"] \ so runners that do not support the [contains] assertion can skip the test at line [%d]\ """, lineNumber))); } @@ -604,7 +686,7 @@ public void testMultipleValidationErrors() { new ClientYamlTestSection( new XContentLocation(0, 0), "section1", - SkipSection.EMPTY, + PrerequisiteSection.EMPTY, Collections.singletonList(containsAssertion) ) ); @@ -625,7 +707,7 @@ public void testMultipleValidationErrors() { doSection.setApiCallSection(apiCall); doSections.add(doSection); } - sections.add(new ClientYamlTestSection(new XContentLocation(0, 0), "section2", SkipSection.EMPTY, doSections)); + sections.add(new ClientYamlTestSection(new XContentLocation(0, 0), "section2", PrerequisiteSection.EMPTY, doSections)); ClientYamlTestSuite testSuite = new ClientYamlTestSuite( "api", @@ -638,23 +720,29 @@ public void testMultipleValidationErrors() { Exception e = expectThrows(IllegalArgumentException.class, testSuite::validate); assertEquals(Strings.format(""" api/name: - attempted to add a [contains] assertion without a corresponding ["skip": "features": "contains"] so runners that \ - do not support the [contains] assertion can skip the test at line [%d], - attempted to add a [do] with a [warnings] section without a corresponding ["skip": "features": "warnings"] so runners \ - that do not support the [warnings] section can skip the test at line [%d], - attempted to add a [do] with a [node_selector] section without a corresponding ["skip": "features": "node_selector"] so \ - runners that do not support the [node_selector] section can skip the test \ - at line [%d]\ + attempted to add a [contains] assertion without a corresponding \ + ["requires": "test_runner_features": "contains"] \ + so runners that do not support the [contains] assertion can skip the test at line [%d], + attempted to add a [do] with a [warnings] section without a corresponding \ + ["requires": "test_runner_features": "warnings"] \ + so runners that do not support the [warnings] section can skip the test at line [%d], + attempted to add a [do] with a [node_selector] section without a corresponding \ + ["requires": "test_runner_features": "node_selector"] \ + so runners that do not support the [node_selector] section can skip the test at line [%d]\ """, firstLineNumber, secondLineNumber, thirdLineNumber), e.getMessage()); } + private static PrerequisiteSection createPrerequisiteSection(String yamlTestRunnerFeature) { + return new PrerequisiteSection(emptyList(), null, emptyList(), null, singletonList(yamlTestRunnerFeature)); + } + public void testAddingDoWithWarningWithSkip() { int lineNumber = between(1, 10000); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setExpectedWarningHeaders(singletonList("foo")); doSection.setApiCallSection(new ApiCallSection("test")); - SkipSection skipSection = new SkipSection(emptyList(), singletonList("warnings"), null); - createTestSuite(skipSection, doSection).validate(); + PrerequisiteSection prerequisiteSection = createPrerequisiteSection("warnings"); + createTestSuite(prerequisiteSection, doSection).validate(); } public void testAddingDoWithWarningRegexWithSkip() { @@ -662,86 +750,86 @@ public void testAddingDoWithWarningRegexWithSkip() { DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); doSection.setExpectedWarningHeadersRegex(singletonList(Pattern.compile("foo"))); doSection.setApiCallSection(new ApiCallSection("test")); - SkipSection skipSection = new SkipSection(emptyList(), singletonList("warnings_regex"), null); - createTestSuite(skipSection, doSection).validate(); + PrerequisiteSection prerequisiteSection = createPrerequisiteSection("warnings_regex"); + createTestSuite(prerequisiteSection, doSection).validate(); } public void testAddingDoWithNodeSelectorWithSkip() { int lineNumber = between(1, 10000); - SkipSection skipSection = new SkipSection(emptyList(), singletonList("node_selector"), null); + PrerequisiteSection prerequisiteSection = createPrerequisiteSection("node_selector"); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); ApiCallSection apiCall = new ApiCallSection("test"); apiCall.setNodeSelector(NodeSelector.SKIP_DEDICATED_MASTERS); doSection.setApiCallSection(apiCall); - createTestSuite(skipSection, doSection).validate(); + createTestSuite(prerequisiteSection, doSection).validate(); } public void testAddingDoWithHeadersWithSkip() { int lineNumber = between(1, 10000); - SkipSection skipSection = new SkipSection(emptyList(), singletonList("headers"), null); + PrerequisiteSection prerequisiteSection = createPrerequisiteSection("headers"); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); ApiCallSection apiCallSection = new ApiCallSection("test"); apiCallSection.addHeaders(singletonMap("foo", "bar")); doSection.setApiCallSection(apiCallSection); - createTestSuite(skipSection, doSection).validate(); + createTestSuite(prerequisiteSection, doSection).validate(); } public void testAddingContainsWithSkip() { int lineNumber = between(1, 10000); - SkipSection skipSection = new SkipSection(emptyList(), singletonList("contains"), null); + PrerequisiteSection prerequisiteSection = createPrerequisiteSection("contains"); ContainsAssertion containsAssertion = new ContainsAssertion( new XContentLocation(lineNumber, 0), randomAlphaOfLength(randomIntBetween(3, 30)), randomDouble() ); - createTestSuite(skipSection, containsAssertion).validate(); + createTestSuite(prerequisiteSection, containsAssertion).validate(); } public void testAddingCloseToWithSkip() { int lineNumber = between(1, 10000); - SkipSection skipSection = new SkipSection(emptyList(), singletonList("close_to"), null); + PrerequisiteSection prerequisiteSection = createPrerequisiteSection("close_to"); CloseToAssertion closeToAssertion = new CloseToAssertion( new XContentLocation(lineNumber, 0), randomAlphaOfLength(randomIntBetween(3, 30)), randomDouble(), randomDouble() ); - createTestSuite(skipSection, closeToAssertion).validate(); + createTestSuite(prerequisiteSection, closeToAssertion).validate(); } public void testAddingIsAfterWithSkip() { int lineNumber = between(1, 10000); - SkipSection skipSection = new SkipSection(emptyList(), singletonList("is_after"), null); + PrerequisiteSection prerequisiteSection = createPrerequisiteSection("is_after"); IsAfterAssertion isAfterAssertion = new IsAfterAssertion( new XContentLocation(lineNumber, 0), randomAlphaOfLength(randomIntBetween(3, 30)), randomInstantBetween(Instant.ofEpochSecond(0L), Instant.ofEpochSecond(3000000000L)) ); - createTestSuite(skipSection, isAfterAssertion).validate(); + createTestSuite(prerequisiteSection, isAfterAssertion).validate(); } - private static ClientYamlTestSuite createTestSuite(SkipSection skipSection, ExecutableSection executableSection) { + private static ClientYamlTestSuite createTestSuite(PrerequisiteSection prerequisiteSection, ExecutableSection executableSection) { final SetupSection setupSection; final TeardownSection teardownSection; final ClientYamlTestSection clientYamlTestSection; switch (randomIntBetween(0, 4)) { case 0 -> { - setupSection = new SetupSection(skipSection, Collections.emptyList()); + setupSection = new SetupSection(prerequisiteSection, Collections.emptyList()); teardownSection = TeardownSection.EMPTY; clientYamlTestSection = new ClientYamlTestSection( new XContentLocation(0, 0), "test", - SkipSection.EMPTY, + PrerequisiteSection.EMPTY, Collections.singletonList(executableSection) ); } case 1 -> { setupSection = SetupSection.EMPTY; - teardownSection = new TeardownSection(skipSection, Collections.emptyList()); + teardownSection = new TeardownSection(prerequisiteSection, Collections.emptyList()); clientYamlTestSection = new ClientYamlTestSection( new XContentLocation(0, 0), "test", - SkipSection.EMPTY, + PrerequisiteSection.EMPTY, Collections.singletonList(executableSection) ); } @@ -751,27 +839,27 @@ private static ClientYamlTestSuite createTestSuite(SkipSection skipSection, Exec clientYamlTestSection = new ClientYamlTestSection( new XContentLocation(0, 0), "test", - skipSection, + prerequisiteSection, Collections.singletonList(executableSection) ); } case 3 -> { - setupSection = new SetupSection(skipSection, Collections.singletonList(executableSection)); + setupSection = new SetupSection(prerequisiteSection, Collections.singletonList(executableSection)); teardownSection = TeardownSection.EMPTY; clientYamlTestSection = new ClientYamlTestSection( new XContentLocation(0, 0), "test", - SkipSection.EMPTY, + PrerequisiteSection.EMPTY, randomBoolean() ? Collections.emptyList() : Collections.singletonList(executableSection) ); } case 4 -> { setupSection = SetupSection.EMPTY; - teardownSection = new TeardownSection(skipSection, Collections.singletonList(executableSection)); + teardownSection = new TeardownSection(prerequisiteSection, Collections.singletonList(executableSection)); clientYamlTestSection = new ClientYamlTestSection( new XContentLocation(0, 0), "test", - SkipSection.EMPTY, + PrerequisiteSection.EMPTY, randomBoolean() ? Collections.emptyList() : Collections.singletonList(executableSection) ); } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java new file mode 100644 index 0000000000000..181ec34fefb7e --- /dev/null +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/PrerequisiteSectionTests.java @@ -0,0 +1,630 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest.yaml.section; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.core.Strings; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.yaml.YamlXContent; +import org.junit.AssumptionViolatedException; + +import java.io.IOException; +import java.util.List; +import java.util.Set; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.emptyOrNullString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class PrerequisiteSectionTests extends AbstractClientYamlTestFragmentParserTestCase { + + public void testSkipVersionMultiRange() { + PrerequisiteSection section = new PrerequisiteSection( + List.of(Prerequisites.skipOnVersionRange("6.0.0 - 6.1.0, 7.1.0 - 7.5.0")), + "foobar", + emptyList(), + "foobar", + emptyList() + ); + + var outOfRangeMockContext = mock(ClientYamlTestExecutionContext.class); + when(outOfRangeMockContext.nodesVersions()).thenReturn(Set.of(Version.CURRENT.toString())) + .thenReturn(Set.of("6.2.0")) + .thenReturn(Set.of("7.0.0")) + .thenReturn(Set.of("7.6.0")); + + assertFalse(section.skipCriteriaMet(outOfRangeMockContext)); + assertFalse(section.skipCriteriaMet(outOfRangeMockContext)); + assertFalse(section.skipCriteriaMet(outOfRangeMockContext)); + assertFalse(section.skipCriteriaMet(outOfRangeMockContext)); + + var inRangeMockContext = mock(ClientYamlTestExecutionContext.class); + when(inRangeMockContext.nodesVersions()).thenReturn(Set.of("6.0.0")) + .thenReturn(Set.of("6.1.0")) + .thenReturn(Set.of("7.1.0")) + .thenReturn(Set.of("7.5.0")); + + assertTrue(section.skipCriteriaMet(inRangeMockContext)); + assertTrue(section.skipCriteriaMet(inRangeMockContext)); + assertTrue(section.skipCriteriaMet(inRangeMockContext)); + assertTrue(section.skipCriteriaMet(inRangeMockContext)); + } + + public void testSkipVersionMultiOpenRange() { + var section = new PrerequisiteSection( + List.of(Prerequisites.skipOnVersionRange("- 7.1.0, 7.2.0 - 7.5.0, 8.0.0 -")), + "foobar", + emptyList(), + "foobar", + emptyList() + ); + + var outOfRangeMockContext = mock(ClientYamlTestExecutionContext.class); + when(outOfRangeMockContext.nodesVersions()).thenReturn(Set.of("7.1.1")).thenReturn(Set.of("7.6.0")); + + assertFalse(section.skipCriteriaMet(outOfRangeMockContext)); + assertFalse(section.skipCriteriaMet(outOfRangeMockContext)); + + var inRangeMockContext = mock(ClientYamlTestExecutionContext.class); + when(inRangeMockContext.nodesVersions()).thenReturn(Set.of("7.0.0")) + .thenReturn(Set.of("7.3.0")) + .thenReturn(Set.of("8.0.0")) + .thenReturn(Set.of(Version.CURRENT.toString())); + + assertTrue(section.skipCriteriaMet(inRangeMockContext)); + assertTrue(section.skipCriteriaMet(inRangeMockContext)); + assertTrue(section.skipCriteriaMet(inRangeMockContext)); + assertTrue(section.skipCriteriaMet(inRangeMockContext)); + } + + public void testSkipVersion() { + PrerequisiteSection section = new PrerequisiteSection( + List.of(Prerequisites.skipOnVersionRange("6.0.0 - 6.1.0")), + "foobar", + emptyList(), + "foobar", + emptyList() + ); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + when(mockContext.nodesVersions()).thenReturn(Set.of(Version.CURRENT.toString())) + .thenReturn(Set.of("6.0.0")) + .thenReturn(Set.of("6.0.0", "6.1.0")) + .thenReturn(Set.of("6.0.0", "5.2.0")); + + assertFalse(section.skipCriteriaMet(mockContext)); + assertTrue(section.skipCriteriaMet(mockContext)); + assertTrue(section.skipCriteriaMet(mockContext)); + assertFalse(section.skipCriteriaMet(mockContext)); + } + + public void testSkipVersionWithTestFeatures() { + PrerequisiteSection section = new PrerequisiteSection( + List.of(Prerequisites.skipOnVersionRange("6.0.0 - 6.1.0")), + "foobar", + emptyList(), + "foobar", + singletonList("warnings") + ); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + when(mockContext.nodesVersions()).thenReturn(Set.of(Version.CURRENT.toString())).thenReturn(Set.of("6.0.0")); + + assertFalse(section.skipCriteriaMet(mockContext)); + assertTrue(section.skipCriteriaMet(mockContext)); + } + + public void testSkipTestFeatures() { + var section = new PrerequisiteSection.PrerequisiteSectionBuilder().requireYamlRunnerFeature("boom").build(); + assertFalse(section.requiresCriteriaMet(mock(ClientYamlTestExecutionContext.class))); + } + + public void testSkipTestFeaturesOverridesAnySkipCriteria() { + var section = new PrerequisiteSection.PrerequisiteSectionBuilder().skipIfOs("test-os").requireYamlRunnerFeature("boom").build(); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + when(mockContext.os()).thenReturn("test-os"); + + // Skip even if OS is matching + assertFalse(section.skipCriteriaMet(mockContext)); + assertFalse(section.requiresCriteriaMet(mockContext)); + } + + public void testSkipOs() { + PrerequisiteSection section = new PrerequisiteSection.PrerequisiteSectionBuilder().skipIfOs("windows95") + .skipIfOs("debian-5") + .build(); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + + when(mockContext.os()).thenReturn("debian-5"); + assertTrue(section.skipCriteriaMet(mockContext)); + + when(mockContext.os()).thenReturn("windows95"); + assertTrue(section.skipCriteriaMet(mockContext)); + + when(mockContext.os()).thenReturn("ms-dos"); + assertFalse(section.skipCriteriaMet(mockContext)); + + assertTrue(section.requiresCriteriaMet(mockContext)); + } + + public void testSkipOsWithTestFeatures() { + PrerequisiteSection section = new PrerequisiteSection.PrerequisiteSectionBuilder().requireYamlRunnerFeature("warnings") + .skipIfOs("windows95") + .skipIfOs("debian-5") + .build(); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + when(mockContext.os()).thenReturn("debian-5"); + assertTrue(section.skipCriteriaMet(mockContext)); + + when(mockContext.os()).thenReturn("windows95"); + assertTrue(section.skipCriteriaMet(mockContext)); + + when(mockContext.os()).thenReturn("ms-dos"); + assertFalse(section.skipCriteriaMet(mockContext)); + } + + public void testBuildMessage() { + PrerequisiteSection section = new PrerequisiteSection( + List.of(Prerequisites.skipOnVersionRange("6.0.0 - 6.1.0")), + "unsupported", + emptyList(), + "required", + singletonList("warnings") + ); + assertEquals("[FOOBAR] skipped, reason: [unsupported] unsupported features [warnings]", section.buildMessage("FOOBAR", true)); + assertEquals("[FOOBAR] skipped, reason: [required] unsupported features [warnings]", section.buildMessage("FOOBAR", false)); + section = new PrerequisiteSection(emptyList(), "unsupported", emptyList(), "required", emptyList()); + assertEquals("[FOOBAR] skipped, reason: [unsupported]", section.buildMessage("FOOBAR", true)); + assertEquals("[FOOBAR] skipped, reason: [required]", section.buildMessage("FOOBAR", false)); + section = new PrerequisiteSection(emptyList(), null, emptyList(), null, singletonList("warnings")); + assertEquals("[FOOBAR] skipped, unsupported features [warnings]", section.buildMessage("FOOBAR", true)); + assertEquals("[FOOBAR] skipped, unsupported features [warnings]", section.buildMessage("FOOBAR", false)); + } + + public void testParseNoPrerequisites() throws IOException { + parser = createParser(YamlXContent.yamlXContent, """ + do: + something + """); + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + + var skipSection = skipSectionBuilder.build(); + assertThat(skipSection.isEmpty(), equalTo(true)); + + // Ensure the input (bogus execute section) was not consumed + var next = ParserUtils.parseField(parser); + assertThat(next, notNullValue()); + assertThat(parser.nextToken(), nullValue()); + } + + public void testParseSkipSectionVersionNoFeature() throws Exception { + Version version = VersionUtils.randomVersion(random()); + parser = createParser(YamlXContent.yamlXContent, Strings.format(""" + version: " - %s" + reason: Delete ignores the parent param""", version)); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseSkipSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, not(emptyOrNullString())); + assertThat(skipSectionBuilder.requiredYamlRunnerFeatures.size(), equalTo(0)); + assertThat(skipSectionBuilder.skipReason, equalTo("Delete ignores the parent param")); + } + + public void testParseSkipSectionFeatureNoVersion() throws Exception { + parser = createParser(YamlXContent.yamlXContent, "features: regex"); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseSkipSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.requiredYamlRunnerFeatures, contains("regex")); + assertThat(skipSectionBuilder.skipReason, nullValue()); + assertThat(skipSectionBuilder.xpackRequired, is(PrerequisiteSection.PrerequisiteSectionBuilder.XPackRequired.NOT_SPECIFIED)); + } + + public void testParseXPackFeature() throws IOException { + parser = createParser(YamlXContent.yamlXContent, "features: xpack"); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseSkipSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.requiredYamlRunnerFeatures, empty()); + assertThat(skipSectionBuilder.skipReason, nullValue()); + assertThat(skipSectionBuilder.xpackRequired, is(PrerequisiteSection.PrerequisiteSectionBuilder.XPackRequired.YES)); + } + + public void testParseNoXPackFeature() throws IOException { + parser = createParser(YamlXContent.yamlXContent, "features: no_xpack"); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseSkipSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.requiredYamlRunnerFeatures, empty()); + assertThat(skipSectionBuilder.skipReason, nullValue()); + assertThat(skipSectionBuilder.xpackRequired, is(PrerequisiteSection.PrerequisiteSectionBuilder.XPackRequired.NO)); + } + + public void testParseBothXPackFeatures() throws IOException { + parser = createParser(YamlXContent.yamlXContent, """ + skip: + features: [xpack, no_xpack] + """); + + var e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); + assertThat(e.getMessage(), containsString("either [xpack] or [no_xpack] can be present, not both")); + } + + public void testParseSkipSectionFeaturesNoVersion() throws Exception { + parser = createParser(YamlXContent.yamlXContent, "features: [regex1,regex2,regex3]"); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseSkipSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.requiredYamlRunnerFeatures, contains("regex1", "regex2", "regex3")); + assertThat(skipSectionBuilder.skipReason, nullValue()); + } + + public void testParseSkipSectionBothFeatureAndVersion() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + version: " - 0.90.2" + features: regex + reason: Delete ignores the parent param"""); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseSkipSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder.skipVersionRange, not(emptyOrNullString())); + assertThat(skipSectionBuilder.requiredYamlRunnerFeatures, contains("regex")); + assertThat(skipSectionBuilder.skipReason, equalTo("Delete ignores the parent param")); + } + + public void testParseSkipSectionNoReason() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + skip: + version: " - 0.90.2" + """); + + Exception e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); + assertThat(e.getMessage(), is("reason is mandatory within skip version section")); + } + + public void testParseSkipSectionNoVersionNorFeature() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + skip: + reason: Delete ignores the parent param + """); + + Exception e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); + assertThat( + e.getMessage(), + is("at least one criteria (version, cluster features, runner features, os) is mandatory within a skip section") + ); + } + + public void testParseSkipSectionOsNoVersion() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + features: ["skip_os", "some_feature"] + os: debian-9 + reason: memory accounting broken, see gh#xyz + """); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseSkipSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.requiredYamlRunnerFeatures, hasSize(2)); + assertThat(skipSectionBuilder.skipOperatingSystems, contains("debian-9")); + assertThat(skipSectionBuilder.skipReason, is("memory accounting broken, see gh#xyz")); + } + + public void testParseSkipSectionOsListNoVersion() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + features: skip_os + os: [debian-9,windows-95,ms-dos] + reason: see gh#xyz + """); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseSkipSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.requiredYamlRunnerFeatures, hasSize(1)); + assertThat(skipSectionBuilder.skipOperatingSystems, containsInAnyOrder("debian-9", "windows-95", "ms-dos")); + assertThat(skipSectionBuilder.skipReason, is("see gh#xyz")); + } + + public void testParseSkipSectionOsListTestFeaturesInRequires() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + - requires: + test_runner_features: skip_os + reason: skip_os is needed for skip based on os + - skip: + os: [debian-9,windows-95,ms-dos] + reason: see gh#xyz + """); + + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.requiredYamlRunnerFeatures, hasSize(1)); + assertThat(skipSectionBuilder.skipOperatingSystems, containsInAnyOrder("debian-9", "windows-95", "ms-dos")); + assertThat(skipSectionBuilder.skipReason, is("see gh#xyz")); + + assertThat(parser.currentToken(), equalTo(XContentParser.Token.END_ARRAY)); + assertThat(parser.nextToken(), nullValue()); + } + + public void testParseSkipSectionOsNoFeatureNoVersion() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + skip: + os: debian-9 + reason: memory accounting broken, see gh#xyz + """); + + Exception e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); + assertThat(e.getMessage(), is("if os is specified, test runner feature [skip_os] must be set")); + } + + public void testParseRequireSectionClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + cluster_features: needed-feature + reason: test skipped when cluster lacks needed-feature + """); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseRequiresSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.requiredClusterFeatures, contains("needed-feature")); + assertThat(skipSectionBuilder.requiresReason, is("test skipped when cluster lacks needed-feature")); + } + + public void testParseSkipSectionClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + cluster_features: undesired-feature + reason: test skipped when undesired-feature is present + """); + + var skipSectionBuilder = new PrerequisiteSection.PrerequisiteSectionBuilder(); + PrerequisiteSection.parseSkipSection(parser, skipSectionBuilder); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.skipClusterFeatures, contains("undesired-feature")); + assertThat(skipSectionBuilder.skipReason, is("test skipped when undesired-feature is present")); + } + + public void testParseRequireAndSkipSectionsClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + - requires: + cluster_features: needed-feature + reason: test needs needed-feature to run + - skip: + cluster_features: undesired-feature + reason: test cannot run when undesired-feature are present + """); + + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.skipClusterFeatures, contains("undesired-feature")); + assertThat(skipSectionBuilder.requiredClusterFeatures, contains("needed-feature")); + assertThat(skipSectionBuilder.skipReason, is("test cannot run when undesired-feature are present")); + assertThat(skipSectionBuilder.requiresReason, is("test needs needed-feature to run")); + + assertThat(parser.currentToken(), equalTo(XContentParser.Token.END_ARRAY)); + assertThat(parser.nextToken(), nullValue()); + } + + public void testParseRequireAndSkipSectionMultipleClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + - requires: + cluster_features: [needed-feature-1, needed-feature-2] + reason: test needs some to run + - skip: + cluster_features: [undesired-feature-1, undesired-feature-2] + reason: test cannot run when some are present + """); + + var skipSectionBuilder = PrerequisiteSection.parseInternal(parser); + assertThat(skipSectionBuilder, notNullValue()); + assertThat(skipSectionBuilder.skipVersionRange, emptyOrNullString()); + assertThat(skipSectionBuilder.skipClusterFeatures, containsInAnyOrder("undesired-feature-1", "undesired-feature-2")); + assertThat(skipSectionBuilder.requiredClusterFeatures, containsInAnyOrder("needed-feature-1", "needed-feature-2")); + assertThat(skipSectionBuilder.skipReason, is("test cannot run when some are present")); + assertThat(skipSectionBuilder.requiresReason, is("test needs some to run")); + + assertThat(parser.currentToken(), equalTo(XContentParser.Token.END_ARRAY)); + assertThat(parser.nextToken(), nullValue()); + } + + public void testParseSameRequireAndSkipClusterFeatures() throws Exception { + parser = createParser(YamlXContent.yamlXContent, """ + - requires: + cluster_features: some-feature + reason: test needs some-feature to run + - skip: + cluster_features: some-feature + reason: test cannot run with some-feature + """); + + var e = expectThrows(ParsingException.class, () -> PrerequisiteSection.parseInternal(parser)); + assertThat(e.getMessage(), is("a cluster feature can be specified either in [requires] or [skip], not both")); + + assertThat(parser.currentToken(), equalTo(XContentParser.Token.END_ARRAY)); + assertThat(parser.nextToken(), nullValue()); + } + + public void testSkipClusterFeaturesAllRequiredMatch() { + PrerequisiteSection section = new PrerequisiteSection( + emptyList(), + "foobar", + List.of(Prerequisites.requireClusterFeatures(Set.of("required-feature-1", "required-feature-2"))), + "foobar", + emptyList() + ); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + when(mockContext.clusterHasFeature("required-feature-1")).thenReturn(true); + when(mockContext.clusterHasFeature("required-feature-2")).thenReturn(true); + + assertFalse(section.skipCriteriaMet(mockContext)); + assertTrue(section.requiresCriteriaMet(mockContext)); + } + + public void testSkipClusterFeaturesSomeRequiredMatch() { + PrerequisiteSection section = new PrerequisiteSection( + emptyList(), + "foobar", + List.of(Prerequisites.requireClusterFeatures(Set.of("required-feature-1", "required-feature-2"))), + "foobar", + emptyList() + ); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + when(mockContext.clusterHasFeature("required-feature-1")).thenReturn(true); + when(mockContext.clusterHasFeature("required-feature-2")).thenReturn(false); + + assertFalse(section.skipCriteriaMet(mockContext)); + assertFalse(section.requiresCriteriaMet(mockContext)); + } + + public void testSkipClusterFeaturesSomeToSkipMatch() { + PrerequisiteSection section = new PrerequisiteSection( + List.of(Prerequisites.skipOnClusterFeatures(Set.of("undesired-feature-1", "undesired-feature-2"))), + "foobar", + emptyList(), + "foobar", + emptyList() + ); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + when(mockContext.clusterHasFeature("undesired-feature-1")).thenReturn(true); + + assertTrue(section.skipCriteriaMet(mockContext)); + } + + public void testSkipClusterFeaturesNoneToSkipMatch() { + PrerequisiteSection section = new PrerequisiteSection( + List.of(Prerequisites.skipOnClusterFeatures(Set.of("undesired-feature-1", "undesired-feature-2"))), + "foobar", + emptyList(), + "foobar", + emptyList() + ); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + assertFalse(section.skipCriteriaMet(mockContext)); + } + + public void testSkipClusterFeaturesAllRequiredSomeToSkipMatch() { + PrerequisiteSection section = new PrerequisiteSection( + List.of(Prerequisites.skipOnClusterFeatures(Set.of("undesired-feature-1", "undesired-feature-2"))), + "foobar", + List.of(Prerequisites.requireClusterFeatures(Set.of("required-feature-1", "required-feature-2"))), + "foobar", + emptyList() + ); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + when(mockContext.clusterHasFeature("required-feature-1")).thenReturn(true); + when(mockContext.clusterHasFeature("required-feature-2")).thenReturn(true); + when(mockContext.clusterHasFeature("undesired-feature-1")).thenReturn(true); + + assertTrue(section.skipCriteriaMet(mockContext)); + assertTrue(section.requiresCriteriaMet(mockContext)); + } + + public void testSkipClusterFeaturesAllRequiredNoneToSkipMatch() { + PrerequisiteSection section = new PrerequisiteSection( + List.of(Prerequisites.skipOnClusterFeatures(Set.of("undesired-feature-1", "undesired-feature-2"))), + "foobar", + List.of(Prerequisites.requireClusterFeatures(Set.of("required-feature-1", "required-feature-2"))), + "foobar", + emptyList() + ); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + when(mockContext.clusterHasFeature("required-feature-1")).thenReturn(true); + when(mockContext.clusterHasFeature("required-feature-2")).thenReturn(true); + + assertFalse(section.skipCriteriaMet(mockContext)); + assertTrue(section.requiresCriteriaMet(mockContext)); + } + + public void evaluateEmpty() { + var section = new PrerequisiteSection(List.of(), "unsupported", List.of(), "required", List.of()); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + section.evaluate(mockContext, "TEST"); + } + + public void evaluateRequiresCriteriaTrue() { + var section = new PrerequisiteSection(List.of(), "unsupported", List.of(Prerequisites.TRUE), "required", List.of()); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + section.evaluate(mockContext, "TEST"); + } + + public void evaluateSkipCriteriaFalse() { + var section = new PrerequisiteSection(List.of(Prerequisites.FALSE), "unsupported", List.of(), "required", List.of()); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + section.evaluate(mockContext, "TEST"); + } + + public void evaluateRequiresCriteriaFalse() { + var section = new PrerequisiteSection( + List.of(Prerequisites.FALSE), + "unsupported", + List.of(Prerequisites.FALSE), + "required", + List.of() + ); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + var e = expectThrows(AssumptionViolatedException.class, () -> section.evaluate(mockContext, "TEST")); + assertThat(e.getMessage(), equalTo("[TEST] skipped, reason: [required]")); + } + + public void evaluateSkipCriteriaTrue() { + var section = new PrerequisiteSection( + List.of(Prerequisites.TRUE), + "unsupported", + List.of(Prerequisites.TRUE), + "required", + List.of() + ); + + var mockContext = mock(ClientYamlTestExecutionContext.class); + var e = expectThrows(AssumptionViolatedException.class, () -> section.evaluate(mockContext, "TEST")); + assertThat(e.getMessage(), equalTo("[TEST] skipped, reason: [unsupported]")); + } +} diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java index 53aaf99d7e272..78c31c85178a6 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java @@ -37,7 +37,7 @@ public void testParseSetupSection() throws Exception { SetupSection setupSection = SetupSection.parse(parser); assertThat(setupSection, notNullValue()); - assertThat(setupSection.getSkipSection().isEmpty(), equalTo(true)); + assertThat(setupSection.getPrerequisiteSection().isEmpty(), equalTo(true)); assertThat(setupSection.getExecutableSections().size(), equalTo(2)); assertThat(setupSection.getExecutableSections().get(0), instanceOf(DoSection.class)); assertThat(((DoSection) setupSection.getExecutableSections().get(0)).getApiCallSection().getApi(), equalTo("index1")); @@ -60,7 +60,7 @@ public void testParseSetSectionInSetupSection() throws IOException { final SetupSection setupSection = SetupSection.parse(parser); assertNotNull(setupSection); - assertTrue(setupSection.getSkipSection().isEmpty()); + assertTrue(setupSection.getPrerequisiteSection().isEmpty()); assertThat(setupSection.getExecutableSections().size(), equalTo(5)); assertThat(setupSection.getExecutableSections().get(0), instanceOf(DoSection.class)); assertThat(((DoSection) setupSection.getExecutableSections().get(0)).getApiCallSection().getApi(), equalTo("cluster.state")); @@ -105,9 +105,9 @@ public void testParseSetupAndSkipSectionNoSkip() throws Exception { SetupSection setupSection = SetupSection.parse(parser); assertThat(setupSection, notNullValue()); - assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false)); - assertThat(setupSection.getSkipSection(), notNullValue()); - assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); + assertThat(setupSection.getPrerequisiteSection().isEmpty(), equalTo(false)); + assertThat(setupSection.getPrerequisiteSection(), notNullValue()); + assertThat(setupSection.getPrerequisiteSection().skipReason, equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(setupSection.getExecutableSections().size(), equalTo(2)); assertThat(setupSection.getExecutableSections().get(0), instanceOf(DoSection.class)); assertThat(((DoSection) setupSection.getExecutableSections().get(0)).getApiCallSection().getApi(), equalTo("index1")); diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java deleted file mode 100644 index bd1f8fa758499..0000000000000 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java +++ /dev/null @@ -1,311 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.test.rest.yaml.section; - -import org.elasticsearch.Version; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.core.Strings; -import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; -import org.elasticsearch.xcontent.yaml.YamlXContent; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Set; - -import static org.hamcrest.Matchers.contains; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.emptyOrNullString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class SkipSectionTests extends AbstractClientYamlTestFragmentParserTestCase { - - public void testSkipVersionMultiRange() { - SkipSection section = new SkipSection( - List.of(SkipCriteria.fromVersionRange("6.0.0 - 6.1.0, 7.1.0 - 7.5.0")), - Collections.emptyList(), - "foobar" - ); - - var outOfRangeMockContext = mock(ClientYamlTestExecutionContext.class); - when(outOfRangeMockContext.nodesVersions()).thenReturn(Set.of(Version.CURRENT.toString())) - .thenReturn(Set.of("6.2.0")) - .thenReturn(Set.of("7.0.0")) - .thenReturn(Set.of("7.6.0")); - - assertFalse(section.skip(outOfRangeMockContext)); - assertFalse(section.skip(outOfRangeMockContext)); - assertFalse(section.skip(outOfRangeMockContext)); - assertFalse(section.skip(outOfRangeMockContext)); - - var inRangeMockContext = mock(ClientYamlTestExecutionContext.class); - when(inRangeMockContext.nodesVersions()).thenReturn(Set.of("6.0.0")) - .thenReturn(Set.of("6.1.0")) - .thenReturn(Set.of("7.1.0")) - .thenReturn(Set.of("7.5.0")); - - assertTrue(section.skip(inRangeMockContext)); - assertTrue(section.skip(inRangeMockContext)); - assertTrue(section.skip(inRangeMockContext)); - assertTrue(section.skip(inRangeMockContext)); - } - - public void testSkipVersionMultiOpenRange() { - var section = new SkipSection( - List.of(SkipCriteria.fromVersionRange("- 7.1.0, 7.2.0 - 7.5.0, 8.0.0 -")), - Collections.emptyList(), - "foobar" - ); - - var outOfRangeMockContext = mock(ClientYamlTestExecutionContext.class); - when(outOfRangeMockContext.nodesVersions()).thenReturn(Set.of("7.1.1")).thenReturn(Set.of("7.6.0")); - - assertFalse(section.skip(outOfRangeMockContext)); - assertFalse(section.skip(outOfRangeMockContext)); - - var inRangeMockContext = mock(ClientYamlTestExecutionContext.class); - when(inRangeMockContext.nodesVersions()).thenReturn(Set.of("7.0.0")) - .thenReturn(Set.of("7.3.0")) - .thenReturn(Set.of("8.0.0")) - .thenReturn(Set.of(Version.CURRENT.toString())); - - assertTrue(section.skip(inRangeMockContext)); - assertTrue(section.skip(inRangeMockContext)); - assertTrue(section.skip(inRangeMockContext)); - assertTrue(section.skip(inRangeMockContext)); - } - - public void testSkipVersion() { - SkipSection section = new SkipSection(List.of(SkipCriteria.fromVersionRange("6.0.0 - 6.1.0")), Collections.emptyList(), "foobar"); - - var mockContext = mock(ClientYamlTestExecutionContext.class); - when(mockContext.nodesVersions()).thenReturn(Set.of(Version.CURRENT.toString())) - .thenReturn(Set.of("6.0.0")) - .thenReturn(Set.of("6.0.0", "6.1.0")) - .thenReturn(Set.of("6.0.0", "5.2.0")); - - assertFalse(section.skip(mockContext)); - assertTrue(section.skip(mockContext)); - assertTrue(section.skip(mockContext)); - assertFalse(section.skip(mockContext)); - } - - public void testSkipVersionWithTestFeatures() { - SkipSection section = new SkipSection( - List.of(SkipCriteria.fromVersionRange("6.0.0 - 6.1.0")), - Collections.singletonList("warnings"), - "foobar" - ); - - var mockContext = mock(ClientYamlTestExecutionContext.class); - when(mockContext.nodesVersions()).thenReturn(Set.of(Version.CURRENT.toString())).thenReturn(Set.of("6.0.0")); - - assertFalse(section.skip(mockContext)); - assertTrue(section.skip(mockContext)); - } - - public void testSkipTestFeatures() { - var section = new SkipSection.SkipSectionBuilder().withTestFeature("boom").build(); - assertTrue(section.skip(mock(ClientYamlTestExecutionContext.class))); - } - - public void testSkipTestFeaturesOverridesAnySkipCriteria() { - var section = new SkipSection.SkipSectionBuilder().withOs("test-os").withTestFeature("boom").build(); - - var mockContext = mock(ClientYamlTestExecutionContext.class); - when(mockContext.os()).thenReturn("test-os"); - - // Skip even if OS is matching - assertTrue(section.skip(mockContext)); - } - - public void testSkipOs() { - SkipSection section = new SkipSection.SkipSectionBuilder().withOs("windows95").withOs("debian-5").build(); - - var mockContext = mock(ClientYamlTestExecutionContext.class); - - when(mockContext.os()).thenReturn("debian-5"); - assertTrue(section.skip(mockContext)); - - when(mockContext.os()).thenReturn("windows95"); - assertTrue(section.skip(mockContext)); - - when(mockContext.os()).thenReturn("ms-dos"); - assertFalse(section.skip(mockContext)); - } - - public void testSkipOsWithTestFeatures() { - SkipSection section = new SkipSection.SkipSectionBuilder().withTestFeature("warnings") - .withOs("windows95") - .withOs("debian-5") - .build(); - - var mockContext = mock(ClientYamlTestExecutionContext.class); - when(mockContext.os()).thenReturn("debian-5"); - assertTrue(section.skip(mockContext)); - - when(mockContext.os()).thenReturn("windows95"); - assertTrue(section.skip(mockContext)); - - when(mockContext.os()).thenReturn("ms-dos"); - assertFalse(section.skip(mockContext)); - } - - public void testMessage() { - SkipSection section = new SkipSection( - List.of(SkipCriteria.fromVersionRange("6.0.0 - 6.1.0")), - Collections.singletonList("warnings"), - "foobar" - ); - assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); - section = new SkipSection(List.of(), Collections.singletonList("warnings"), "foobar"); - assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); - section = new SkipSection(List.of(), Collections.singletonList("warnings"), null); - assertEquals("[FOOBAR] skipped, unsupported features [warnings]", section.getSkipMessage("FOOBAR")); - } - - public void testParseSkipSectionVersionNoFeature() throws Exception { - Version version = VersionUtils.randomVersion(random()); - parser = createParser(YamlXContent.yamlXContent, Strings.format(""" - version: " - %s" - reason: Delete ignores the parent param""", version)); - - var skipSectionBuilder = SkipSection.parseInternal(parser); - assertThat(skipSectionBuilder, notNullValue()); - assertThat(skipSectionBuilder.version, not(emptyOrNullString())); - assertThat(skipSectionBuilder.testFeatures.size(), equalTo(0)); - assertThat(skipSectionBuilder.reason, equalTo("Delete ignores the parent param")); - } - - public void testParseSkipSectionFeatureNoVersion() throws Exception { - parser = createParser(YamlXContent.yamlXContent, "features: regex"); - - var skipSectionBuilder = SkipSection.parseInternal(parser); - assertThat(skipSectionBuilder, notNullValue()); - assertThat(skipSectionBuilder.version, emptyOrNullString()); - assertThat(skipSectionBuilder.testFeatures, contains("regex")); - assertThat(skipSectionBuilder.reason, nullValue()); - assertThat(skipSectionBuilder.xpackRequested, is(SkipSection.SkipSectionBuilder.XPackRequested.NOT_SPECIFIED)); - } - - public void testParseXPackFeature() throws IOException { - parser = createParser(YamlXContent.yamlXContent, "features: xpack"); - - var skipSectionBuilder = SkipSection.parseInternal(parser); - assertThat(skipSectionBuilder, notNullValue()); - assertThat(skipSectionBuilder.version, emptyOrNullString()); - assertThat(skipSectionBuilder.testFeatures, empty()); - assertThat(skipSectionBuilder.reason, nullValue()); - assertThat(skipSectionBuilder.xpackRequested, is(SkipSection.SkipSectionBuilder.XPackRequested.YES)); - } - - public void testParseNoXPackFeature() throws IOException { - parser = createParser(YamlXContent.yamlXContent, "features: no_xpack"); - - var skipSectionBuilder = SkipSection.parseInternal(parser); - assertThat(skipSectionBuilder, notNullValue()); - assertThat(skipSectionBuilder.version, emptyOrNullString()); - assertThat(skipSectionBuilder.testFeatures, empty()); - assertThat(skipSectionBuilder.reason, nullValue()); - assertThat(skipSectionBuilder.xpackRequested, is(SkipSection.SkipSectionBuilder.XPackRequested.NO)); - } - - public void testParseBothXPackFeatures() throws IOException { - parser = createParser(YamlXContent.yamlXContent, "features: [xpack, no_xpack]"); - - var e = expectThrows(ParsingException.class, () -> SkipSection.parseInternal(parser)); - assertThat(e.getMessage(), containsString("either `xpack` or `no_xpack` can be present, not both")); - } - - public void testParseSkipSectionFeaturesNoVersion() throws Exception { - parser = createParser(YamlXContent.yamlXContent, "features: [regex1,regex2,regex3]"); - - var skipSectionBuilder = SkipSection.parseInternal(parser); - assertThat(skipSectionBuilder, notNullValue()); - assertThat(skipSectionBuilder.version, emptyOrNullString()); - assertThat(skipSectionBuilder.testFeatures, contains("regex1", "regex2", "regex3")); - assertThat(skipSectionBuilder.reason, nullValue()); - } - - public void testParseSkipSectionBothFeatureAndVersion() throws Exception { - parser = createParser(YamlXContent.yamlXContent, """ - version: " - 0.90.2" - features: regex - reason: Delete ignores the parent param"""); - - var skipSectionBuilder = SkipSection.parseInternal(parser); - assertThat(skipSectionBuilder.version, not(emptyOrNullString())); - assertThat(skipSectionBuilder.testFeatures, contains("regex")); - assertThat(skipSectionBuilder.reason, equalTo("Delete ignores the parent param")); - } - - public void testParseSkipSectionNoReason() throws Exception { - parser = createParser(YamlXContent.yamlXContent, "version: \" - 0.90.2\"\n"); - - Exception e = expectThrows(ParsingException.class, () -> SkipSection.parseInternal(parser)); - assertThat(e.getMessage(), is("reason is mandatory within skip version section")); - } - - public void testParseSkipSectionNoVersionNorFeature() throws Exception { - parser = createParser(YamlXContent.yamlXContent, "reason: Delete ignores the parent param\n"); - - Exception e = expectThrows(ParsingException.class, () -> SkipSection.parseInternal(parser)); - assertThat(e.getMessage(), is("at least one criteria (version, test features, os) is mandatory within a skip section")); - } - - public void testParseSkipSectionOsNoVersion() throws Exception { - parser = createParser(YamlXContent.yamlXContent, """ - features: ["skip_os", "some_feature"] - os: debian-9 - reason: memory accounting broken, see gh#xyz - """); - - var skipSectionBuilder = SkipSection.parseInternal(parser); - assertThat(skipSectionBuilder, notNullValue()); - assertThat(skipSectionBuilder.version, emptyOrNullString()); - assertThat(skipSectionBuilder.testFeatures, hasSize(2)); - assertThat(skipSectionBuilder.operatingSystems, contains("debian-9")); - assertThat(skipSectionBuilder.reason, is("memory accounting broken, see gh#xyz")); - } - - public void testParseSkipSectionOsListNoVersion() throws Exception { - parser = createParser(YamlXContent.yamlXContent, """ - features: skip_os - os: [debian-9,windows-95,ms-dos] - reason: see gh#xyz - """); - - var skipSectionBuilder = SkipSection.parseInternal(parser); - assertThat(skipSectionBuilder, notNullValue()); - assertThat(skipSectionBuilder.version, emptyOrNullString()); - assertThat(skipSectionBuilder.testFeatures, hasSize(1)); - assertThat(skipSectionBuilder.operatingSystems, containsInAnyOrder("debian-9", "windows-95", "ms-dos")); - assertThat(skipSectionBuilder.reason, is("see gh#xyz")); - } - - public void testParseSkipSectionOsNoFeatureNoVersion() throws Exception { - parser = createParser(YamlXContent.yamlXContent, """ - os: debian-9 - reason: memory accounting broken, see gh#xyz - """); - - Exception e = expectThrows(ParsingException.class, () -> SkipSection.parseInternal(parser)); - assertThat(e.getMessage(), is("if os is specified, feature skip_os must be set")); - } -} diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java index 2c6b4f5be12de..9844b90eb2148 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java @@ -35,7 +35,7 @@ public void testParseTeardownSection() throws Exception { TeardownSection section = TeardownSection.parse(parser); assertThat(section, notNullValue()); - assertThat(section.getSkipSection().isEmpty(), equalTo(true)); + assertThat(section.getPrerequisiteSection().isEmpty(), equalTo(true)); assertThat(section.getDoSections().size(), equalTo(2)); assertThat(((DoSection) section.getDoSections().get(0)).getApiCallSection().getApi(), equalTo("delete")); assertThat(((DoSection) section.getDoSections().get(1)).getApiCallSection().getApi(), equalTo("delete2")); @@ -62,8 +62,8 @@ public void testParseWithSkip() throws Exception { TeardownSection section = TeardownSection.parse(parser); assertThat(section, notNullValue()); - assertThat(section.getSkipSection().isEmpty(), equalTo(false)); - assertThat(section.getSkipSection().getReason(), equalTo("there is a reason")); + assertThat(section.getPrerequisiteSection().isEmpty(), equalTo(false)); + assertThat(section.getPrerequisiteSection().skipReason, equalTo("there is a reason")); assertThat(section.getDoSections().size(), equalTo(2)); assertThat(((DoSection) section.getDoSections().get(0)).getApiCallSection().getApi(), equalTo("delete")); assertThat(((DoSection) section.getDoSections().get(1)).getApiCallSection().getApi(), equalTo("delete2")); diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncIT.java b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncIT.java index c2fa41a5241db..657f396b2857f 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncIT.java @@ -52,7 +52,7 @@ public static Iterable parameters(Function modifyExecutableSection(e, modify)).toList() ); result.add(new Object[] { new ClientYamlTestCandidate(candidate.getRestTestSuite(), modified) }); diff --git a/x-pack/qa/runtime-fields/src/main/java/org/elasticsearch/xpack/runtimefields/test/CoreTestTranslater.java b/x-pack/qa/runtime-fields/src/main/java/org/elasticsearch/xpack/runtimefields/test/CoreTestTranslater.java index 110a1fd24d0d3..badc04800e40f 100644 --- a/x-pack/qa/runtime-fields/src/main/java/org/elasticsearch/xpack/runtimefields/test/CoreTestTranslater.java +++ b/x-pack/qa/runtime-fields/src/main/java/org/elasticsearch/xpack/runtimefields/test/CoreTestTranslater.java @@ -67,7 +67,7 @@ public Iterable parameters() throws Exception { ClientYamlTestSection modified = new ClientYamlTestSection( candidate.getTestSection().getLocation(), candidate.getTestSection().getName(), - candidate.getTestSection().getSkipSection(), + candidate.getTestSection().getPrerequisiteSection(), candidate.getTestSection().getExecutableSections() ); result.add(new Object[] { new ClientYamlTestCandidate(suite.modified, modified) }); @@ -169,7 +169,7 @@ public Suite(ClientYamlTestCandidate candidate) { candidate.getApi(), candidate.getName(), candidate.getRestTestSuite().getFile(), - new SetupSection(candidate.getSetupSection().getSkipSection(), setup), + new SetupSection(candidate.getSetupSection().getPrerequisiteSection(), setup), candidate.getTeardownSection(), List.of() ); From 6f2c036ce6241d39e99d484cb5f121475fa95c41 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 15 Jan 2024 14:12:20 +0000 Subject: [PATCH 28/95] [ML] Detect timeout when waiting for download task (#103197) A list tasks timeout indicates the task exists and is in progress. Interpreting the timeout as the task not existing meant the download check would incorrectly assume the download had completed. --- .../TransportLoadTrainedModelPackage.java | 1 + .../TransportDeleteTrainedModelAction.java | 2 +- .../TransportPutTrainedModelAction.java | 23 ++++++--- ...portStartTrainedModelDeploymentAction.java | 38 ++++++++------ .../xpack/ml/utils/TaskRetriever.java | 51 ++++++++++++++++--- .../xpack/ml/utils/TaskRetrieverTests.java | 35 +++++++++++-- .../test/ml/3rd_party_deployment.yml | 7 ++- 7 files changed, 118 insertions(+), 39 deletions(-) diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java index 2827255874224..ead7c836463fd 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java @@ -106,6 +106,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A } catch (Exception e) { taskManager.unregister(downloadTask); listener.onFailure(e); + return; } if (request.isWaitForCompletion() == false) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java index 7f2d0e47975e3..8ce41262a1e1d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java @@ -133,7 +133,7 @@ static void cancelDownloadTask(Client client, String modelId, ActionListener null, taskListener); } static Set getReferencedModelKeys(IngestMetadata ingestMetadata, IngestService ingestService) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java index c89b5005444b5..edbb9f297c8cd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java @@ -380,14 +380,21 @@ static void checkForExistingTask( ActionListener storeModelListener, TimeValue timeout ) { - TaskRetriever.getDownloadTaskInfo(client, modelId, isWaitForCompletion, ActionListener.wrap(taskInfo -> { - if (taskInfo != null) { - getModelInformation(client, modelId, sendResponseListener); - } else { - // no task exists so proceed with creating the model - storeModelListener.onResponse(null); - } - }, sendResponseListener::onFailure), timeout); + TaskRetriever.getDownloadTaskInfo( + client, + modelId, + isWaitForCompletion, + timeout, + () -> "Timed out waiting for model download to complete", + ActionListener.wrap(taskInfo -> { + if (taskInfo != null) { + getModelInformation(client, modelId, sendResponseListener); + } else { + // no task exists so proceed with creating the model + storeModelListener.onResponse(null); + } + }, sendResponseListener::onFailure) + ); } private static void getModelInformation(Client client, String modelId, ActionListener listener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java index 0a5641836df4a..2f2a76a1df1e2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java @@ -577,21 +577,29 @@ private static void step1CheckForDownloadTask( String modelId, ActionListener nextStepListener ) { - TaskRetriever.getDownloadTaskInfo(mlOriginClient, modelId, timeout != null, ActionListener.wrap(taskInfo -> { - if (taskInfo == null) { - nextStepListener.onResponse(null); - } else { - failOrRespondWith0( - () -> new ElasticsearchStatusException( - Messages.getMessage(Messages.MODEL_DOWNLOAD_IN_PROGRESS, modelId), - RestStatus.REQUEST_TIMEOUT - ), - errorIfDefinitionIsMissing, - modelId, - failureListener - ); - } - }, failureListener::onFailure), timeout); + // check task is present, do not wait for completion + TaskRetriever.getDownloadTaskInfo( + mlOriginClient, + modelId, + timeout != null, + timeout, + () -> Messages.getMessage(Messages.MODEL_DOWNLOAD_IN_PROGRESS, modelId), + ActionListener.wrap(taskInfo -> { + if (taskInfo == null) { + nextStepListener.onResponse(null); + } else { + failOrRespondWith0( + () -> new ElasticsearchStatusException( + Messages.getMessage(Messages.MODEL_DOWNLOAD_IN_PROGRESS, modelId), + RestStatus.REQUEST_TIMEOUT + ), + errorIfDefinitionIsMissing, + modelId, + failureListener + ); + } + }, failureListener::onFailure) + ); } private static void failOrRespondWith0( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/TaskRetriever.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/TaskRetriever.java index 652592bb08591..b60f57e5aaaf6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/TaskRetriever.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/TaskRetriever.java @@ -7,20 +7,28 @@ package org.elasticsearch.xpack.ml.utils; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.transport.ReceiveTimeoutTransportException; import org.elasticsearch.xpack.core.ml.MlTasks; +import java.util.function.Supplier; + import static org.elasticsearch.xpack.core.ml.MlTasks.downloadModelTaskDescription; /** * Utility class for retrieving download tasks created by a PUT trained model API request. */ public class TaskRetriever { + /** * Returns a {@link TaskInfo} if one exists representing an in-progress trained model download. * @@ -28,16 +36,18 @@ public class TaskRetriever { * @param modelId the id of the model to check for an existing task * @param waitForCompletion a boolean flag determine if the request should wait for an existing task to complete before returning (aka * wait for the download to complete) + * @param timeout the timeout value in seconds that the request should fail if it does not complete + * @param errorMessageOnWaitTimeout Message to use if the request times out with {@code waitForCompletion == true} * @param listener a listener, if a task is found it is returned via {@code ActionListener.onResponse(taskInfo)}. * If a task is not found null is returned - * @param timeout the timeout value in seconds that the request should fail if it does not complete */ public static void getDownloadTaskInfo( Client client, String modelId, boolean waitForCompletion, - ActionListener listener, - TimeValue timeout + TimeValue timeout, + Supplier errorMessageOnWaitTimeout, + ActionListener listener ) { client.admin() .cluster() @@ -53,19 +63,46 @@ public static void getDownloadTaskInfo( if (tasks.size() > 0) { // there really shouldn't be more than a single task but if there is we'll just use the first one listener.onResponse(tasks.get(0)); + } else if (waitForCompletion && didItTimeout(response)) { + listener.onFailure(taskDidNotCompleteException(errorMessageOnWaitTimeout.get())); } else { + response.rethrowFailures("Checking model [" + modelId + "] download status"); listener.onResponse(null); } - }, - e -> listener.onFailure( + }, e -> { + listener.onFailure( new ElasticsearchStatusException( "Unable to retrieve task information for model id [{}]", RestStatus.INTERNAL_SERVER_ERROR, e, modelId ) - ) - )); + ); + })); + } + + private static boolean didItTimeout(ListTasksResponse response) { + if (response.getNodeFailures().isEmpty() == false) { + // if one node timed out then the others will also have timed out + var firstNodeFailure = response.getNodeFailures().get(0); + if (firstNodeFailure.status() == RestStatus.REQUEST_TIMEOUT) { + return true; + } + + var timeoutException = ExceptionsHelper.unwrap( + firstNodeFailure, + ElasticsearchTimeoutException.class, + ReceiveTimeoutTransportException.class + ); + if (timeoutException != null) { + return true; + } + } + return false; + } + + private static ElasticsearchException taskDidNotCompleteException(String message) { + return new ElasticsearchStatusException(message, RestStatus.REQUEST_TIMEOUT); } private TaskRetriever() {} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TaskRetrieverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TaskRetrieverTests.java index 719a9be43080f..6ee39266ba5fc 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TaskRetrieverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TaskRetrieverTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.ml.utils; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequestBuilder; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -67,7 +68,7 @@ public void testGetExistingTaskInfoCallsOnFailureForAnError() { var listener = new PlainActionFuture(); - getDownloadTaskInfo(client, "modelId", false, listener, TIMEOUT); + getDownloadTaskInfo(client, "modelId", false, TIMEOUT, () -> "", listener); var exception = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); assertThat(exception.status(), is(RestStatus.INTERNAL_SERVER_ERROR)); @@ -78,7 +79,7 @@ public void testGetExistingTaskInfoCallsListenerWithNullWhenNoTasksExist() { var client = mockClientWithTasksResponse(Collections.emptyList(), threadPool); var listener = new PlainActionFuture(); - getDownloadTaskInfo(client, "modelId", false, listener, TIMEOUT); + getDownloadTaskInfo(client, "modelId", false, TIMEOUT, () -> "", listener); assertThat(listener.actionGet(TIMEOUT), nullValue()); } @@ -88,7 +89,7 @@ public void testGetExistingTaskInfoCallsListenerWithTaskInfoWhenTaskExists() { var client = mockClientWithTasksResponse(listTaskInfo, threadPool); var listener = new PlainActionFuture(); - getDownloadTaskInfo(client, "modelId", false, listener, TIMEOUT); + getDownloadTaskInfo(client, "modelId", false, TIMEOUT, () -> "", listener); assertThat(listener.actionGet(TIMEOUT), is(listTaskInfo.get(0))); } @@ -98,11 +99,37 @@ public void testGetExistingTaskInfoCallsListenerWithFirstTaskInfoWhenMultipleTas var client = mockClientWithTasksResponse(listTaskInfo, threadPool); var listener = new PlainActionFuture(); - getDownloadTaskInfo(client, "modelId", false, listener, TIMEOUT); + getDownloadTaskInfo(client, "modelId", false, TIMEOUT, () -> "", listener); assertThat(listener.actionGet(TIMEOUT), is(listTaskInfo.get(0))); } + public void testGetTimeoutOnWaitForCompletion() { + var client = mockListTasksClient(threadPool); + + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener actionListener = (ActionListener) invocationOnMock.getArguments()[2]; + actionListener.onResponse( + new ListTasksResponse( + List.of(), + List.of(), + List.of(new ElasticsearchStatusException("node timeout", RestStatus.REQUEST_TIMEOUT)) + ) + ); + + return Void.TYPE; + }).when(client).execute(same(TransportListTasksAction.TYPE), any(), any()); + + var listener = new PlainActionFuture(); + + getDownloadTaskInfo(client, "modelId", true, TIMEOUT, () -> "Testing timeout", listener); + + var exception = expectThrows(ElasticsearchException.class, () -> listener.actionGet(TIMEOUT)); + assertThat(exception.status(), is(RestStatus.REQUEST_TIMEOUT)); + assertThat(exception.getMessage(), is("Testing timeout")); + } + /** * A helper method for setting up a mock cluster client to return the passed in list of tasks. * diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml index 818dad0114eef..fdccf473b358a 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml @@ -218,10 +218,6 @@ setup: --- "Test start deployment fails while model download in progress": - - skip: - version: all - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/103153" - - do: ml.put_trained_model: model_id: .elser_model_2 @@ -231,10 +227,13 @@ setup: "field_names": ["text_field"] } } + # Set a low timeout so the test doesn't actually wait + # for the model download to complete - do: catch: /Model download task is currently running\. Wait for trained model \[.elser_model_2\] download task to complete then try again/ ml.start_trained_model_deployment: model_id: .elser_model_2 + timeout: 1s - do: ml.delete_trained_model: model_id: .elser_model_2 From 4717f9c5c12dd12c4bb542996bdd728b7d0e9a02 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Mon, 15 Jan 2024 15:36:33 +0100 Subject: [PATCH 29/95] Don't throw error for remote shards that open PIT filtered out (#104288) We recently introduced support for index_filter to the open point in time API (see #102388). Open point in time supports executing against remote indices, in which case it will open a reader context against the target remote shards. With support for index_filter, shards that cannot match the filter are not even included in the PIT id that open PIT returns. When the following search is executed that includes such PIT id, there is one search shards call per cluster performed, which will return all shards from the targeted indices, including those that open PIT has filtered out. In that case, we should just ignore those shards instead of throwing exception when those are looked up in the search context id map built from the PIT id. Closes #102596 --- docs/changelog/104288.yaml | 6 + qa/ccs-common-rest/build.gradle | 3 +- .../test/search/350_point_in_time.yml | 4 + .../action/search/CCSPointInTimeIT.java | 142 +++++++++++++++--- .../TransportOpenPointInTimeAction.java | 3 + .../action/search/TransportSearchAction.java | 23 ++- 6 files changed, 161 insertions(+), 20 deletions(-) create mode 100644 docs/changelog/104288.yaml diff --git a/docs/changelog/104288.yaml b/docs/changelog/104288.yaml new file mode 100644 index 0000000000000..67f54e37cf9dc --- /dev/null +++ b/docs/changelog/104288.yaml @@ -0,0 +1,6 @@ +pr: 104288 +summary: Don't throw error for remote shards that open PIT filtered out +area: Search +type: bug +issues: + - 102596 diff --git a/qa/ccs-common-rest/build.gradle b/qa/ccs-common-rest/build.gradle index 8ad306144bd98..41dba06649ea1 100644 --- a/qa/ccs-common-rest/build.gradle +++ b/qa/ccs-common-rest/build.gradle @@ -40,8 +40,7 @@ tasks.named("yamlRestTest") { 'search.aggregation/220_filters_bucket/cache hits', // node_selector? 'search.aggregation/50_filter/Standard queries get cached', 'search.aggregation/50_filter/Terms lookup gets cached', // terms lookup by "index" doesn't seem to work correctly - 'search.aggregation/70_adjacency_matrix/Terms lookup', // terms lookup by "index" doesn't seem to work correctly - 'search/350_point_in_time/point-in-time with index filter' + 'search.aggregation/70_adjacency_matrix/Terms lookup' // terms lookup by "index" doesn't seem to work correctly ].join(',') } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml index 7e78450931df5..71ea09d33e81d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search/350_point_in_time.yml @@ -23,6 +23,10 @@ setup: - do: indices.create: index: test2 + body: + settings: + index: + number_of_shards: 2 - do: index: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java index eff681f1f281b..4bd97f772e4c3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.builder.PointInTimeBuilder; @@ -21,6 +22,7 @@ import org.elasticsearch.search.query.ThrowingQueryBuilder; import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.transport.RemoteClusterAware; +import org.hamcrest.MatcherAssert; import java.util.ArrayList; import java.util.Collection; @@ -74,11 +76,15 @@ public void testBasic() { final Client localClient = client(LOCAL_CLUSTER); final Client remoteClient = client(REMOTE_CLUSTER); int localNumDocs = randomIntBetween(10, 50); - assertAcked(localClient.admin().indices().prepareCreate("local_test")); + assertAcked( + localClient.admin().indices().prepareCreate("local_test").setSettings(Settings.builder().put("index.number_of_shards", 3)) + ); indexDocs(localClient, "local_test", localNumDocs); int remoteNumDocs = randomIntBetween(10, 50); - assertAcked(remoteClient.admin().indices().prepareCreate("remote_test")); + assertAcked( + remoteClient.admin().indices().prepareCreate("remote_test").setSettings(Settings.builder().put("index.number_of_shards", 3)) + ); indexDocs(remoteClient, "remote_test", remoteNumDocs); boolean includeLocalIndex = randomBoolean(); List indices = new ArrayList<>(); @@ -107,19 +113,120 @@ public void testBasic() { SearchResponse.Clusters clusters = resp.getClusters(); int expectedNumClusters = 1 + (includeLocalIndex ? 1 : 0); - assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), equalTo(expectedNumClusters)); - assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); + MatcherAssert.assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); + MatcherAssert.assertThat( + clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), + equalTo(expectedNumClusters) + ); + MatcherAssert.assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); if (includeLocalIndex) { SearchResponse.Cluster localCluster = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); assertNotNull(localCluster); - assertOneSuccessfulShard(localCluster); + assertAllSuccessfulShards(localCluster, 3, 0); } SearchResponse.Cluster remoteCluster = clusters.getCluster(REMOTE_CLUSTER); assertNotNull(remoteCluster); - assertOneSuccessfulShard(remoteCluster); + assertAllSuccessfulShards(remoteCluster, 3, 0); + } + ); + } finally { + closePointInTime(pitId); + } + } + + public void testOpenPITWithIndexFilter() { + final Client localClient = client(LOCAL_CLUSTER); + final Client remoteClient = client(REMOTE_CLUSTER); + + assertAcked( + localClient.admin().indices().prepareCreate("local_test").setSettings(Settings.builder().put("index.number_of_shards", 3)) + ); + localClient.prepareIndex("local_test").setId("1").setSource("value", "1", "@timestamp", "2024-03-01").get(); + localClient.prepareIndex("local_test").setId("2").setSource("value", "2", "@timestamp", "2023-12-01").get(); + localClient.admin().indices().prepareRefresh("local_test").get(); + + assertAcked( + remoteClient.admin().indices().prepareCreate("remote_test").setSettings(Settings.builder().put("index.number_of_shards", 3)) + ); + remoteClient.prepareIndex("remote_test").setId("1").setSource("value", "1", "@timestamp", "2024-01-01").get(); + remoteClient.prepareIndex("remote_test").setId("2").setSource("value", "2", "@timestamp", "2023-12-01").get(); + remoteClient.admin().indices().prepareRefresh("remote_test").get(); + + List indices = new ArrayList<>(); + indices.add(randomFrom("*", "local_*", "local_test")); + indices.add(randomFrom("*:*", "remote_cluster:*", "remote_cluster:remote_test")); + + OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices.toArray(new String[0])); + request.keepAlive(TimeValue.timeValueMinutes(2)); + request.indexFilter(new RangeQueryBuilder("@timestamp").gte("2023-12-15")); + final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); + String pitId = response.getPointInTimeId(); + + if (randomBoolean()) { + localClient.prepareIndex("local_test").setId("local_new").setSource().get(); + localClient.admin().indices().prepareRefresh().get(); + } + if (randomBoolean()) { + remoteClient.prepareIndex("remote_test").setId("remote_new").setSource().get(); + remoteClient.admin().indices().prepareRefresh().get(); + } + + try { + assertNoFailuresAndResponse( + localClient.prepareSearch() + .setPreference(null) + .setQuery(new MatchAllQueryBuilder()) + .setPointInTime(new PointInTimeBuilder(pitId)), + resp -> { + assertHitCount(resp, 2); + + SearchResponse.Clusters clusters = resp.getClusters(); + int expectedNumClusters = 2; + MatcherAssert.assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); + MatcherAssert.assertThat( + clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), + equalTo(expectedNumClusters) + ); + MatcherAssert.assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); + + // both indices (local and remote) have shards, but there is a single shard left after can match + SearchResponse.Cluster localCluster = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localCluster); + assertAllSuccessfulShards(localCluster, 1, 0); + SearchResponse.Cluster remoteCluster = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteCluster); + assertAllSuccessfulShards(remoteCluster, 1, 0); + } + ); + + assertNoFailuresAndResponse( + localClient.prepareSearch() + .setPreference(null) + // test the scenario where search also runs can match and filters additional shards out + .setPreFilterShardSize(1) + .setQuery(new RangeQueryBuilder("@timestamp").gte("2024-02-01")) + .setPointInTime(new PointInTimeBuilder(pitId)), + resp -> { + assertHitCount(resp, 1); + + SearchResponse.Clusters clusters = resp.getClusters(); + int expectedNumClusters = 2; + MatcherAssert.assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); + MatcherAssert.assertThat( + clusters.getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL), + equalTo(expectedNumClusters) + ); + MatcherAssert.assertThat(clusters.getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED), equalTo(0)); + + // both indices (local and remote) have shards, but there is a single shard left after can match + SearchResponse.Cluster localCluster = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localCluster); + assertAllSuccessfulShards(localCluster, 1, 0); + SearchResponse.Cluster remoteCluster = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteCluster); + assertAllSuccessfulShards(remoteCluster, 1, 1); } ); } finally { @@ -180,16 +287,6 @@ public void testFailuresOnOneShardsWithPointInTime() throws ExecutionException, } } - private static void assertOneSuccessfulShard(SearchResponse.Cluster cluster) { - assertThat(cluster.getTotalShards(), equalTo(1)); - assertThat(cluster.getSuccessfulShards(), equalTo(1)); - assertThat(cluster.getFailedShards(), equalTo(0)); - assertThat(cluster.getFailures().size(), equalTo(0)); - assertThat(cluster.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); - assertThat(cluster.getTook().millis(), greaterThanOrEqualTo(0L)); - assertFalse(cluster.isTimedOut()); - } - private static void assertOneFailedShard(SearchResponse.Cluster cluster, int totalShards) { assertThat(cluster.getSuccessfulShards(), equalTo(totalShards - 1)); assertThat(cluster.getFailedShards(), equalTo(1)); @@ -200,6 +297,17 @@ private static void assertOneFailedShard(SearchResponse.Cluster cluster, int tot assertFalse(cluster.isTimedOut()); } + private static void assertAllSuccessfulShards(SearchResponse.Cluster cluster, int numShards, int skippedShards) { + assertThat(cluster.getTotalShards(), equalTo(numShards)); + assertThat(cluster.getSkippedShards(), equalTo(skippedShards)); + assertThat(cluster.getSuccessfulShards(), equalTo(numShards)); + assertThat(cluster.getFailedShards(), equalTo(0)); + assertThat(cluster.getFailures().size(), equalTo(0)); + assertThat(cluster.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + assertThat(cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertFalse(cluster.isTimedOut()); + } + private String openPointInTime(String[] indices, TimeValue keepAlive) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); final OpenPointInTimeResponse response = client().execute(TransportOpenPointInTimeAction.TYPE, request).actionGet(); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index 6989ae159c9e2..ca3bca7c3969f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -135,6 +135,9 @@ public SearchPhase newSearchPhase( ThreadPool threadPool, SearchResponse.Clusters clusters ) { + // Note: remote shards are prefiltered via can match as part of search shards. They don't need additional pre-filtering and + // that is signaled to the local can match through the SearchShardIterator#prefiltered flag. Local shards do need to go + // through the local can match phase. if (SearchService.canRewriteToMatchNone(searchRequest.source())) { return new CanMatchPreFilterSearchPhase( logger, diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 06e9b8fa51319..727e576764102 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -981,8 +981,12 @@ static List getRemoteShardsIteratorFromPointInTime( for (Map.Entry entry : searchShardsResponses.entrySet()) { for (SearchShardsGroup group : entry.getValue().getGroups()) { final ShardId shardId = group.shardId(); - final String clusterAlias = entry.getKey(); final SearchContextIdForNode perNode = searchContextId.shards().get(shardId); + if (perNode == null) { + // the shard was skipped after can match, hence it is not even part of the pit id + continue; + } + final String clusterAlias = entry.getKey(); assert clusterAlias.equals(perNode.getClusterAlias()) : clusterAlias + " != " + perNode.getClusterAlias(); final List targetNodes = new ArrayList<>(group.allocatedNodes().size()); targetNodes.add(perNode.getNode()); @@ -1011,9 +1015,26 @@ static List getRemoteShardsIteratorFromPointInTime( remoteShardIterators.add(shardIterator); } } + assert checkAllRemotePITShardsWereReturnedBySearchShards(searchContextId.shards(), searchShardsResponses) + : "search shards did not return remote shards that PIT included: " + searchContextId.shards(); return remoteShardIterators; } + private static boolean checkAllRemotePITShardsWereReturnedBySearchShards( + Map searchContextIdShards, + Map searchShardsResponses + ) { + Map searchContextIdForNodeMap = new HashMap<>(searchContextIdShards); + for (SearchShardsResponse searchShardsResponse : searchShardsResponses.values()) { + for (SearchShardsGroup group : searchShardsResponse.getGroups()) { + searchContextIdForNodeMap.remove(group.shardId()); + } + } + return searchContextIdForNodeMap.values() + .stream() + .allMatch(searchContextIdForNode -> searchContextIdForNode.getClusterAlias() == null); + } + Index[] resolveLocalIndices(OriginalIndices localIndices, ClusterState clusterState, SearchTimeProvider timeProvider) { if (localIndices == null) { return Index.EMPTY_ARRAY; // don't search on any local index (happens when only remote indices were specified) From e6737ba5877b54ce9fd474f06c530680af458cc4 Mon Sep 17 00:00:00 2001 From: Matt Culbreth Date: Mon, 15 Jan 2024 09:59:07 -0500 Subject: [PATCH 30/95] Add .status to end of HealthPeriodicLogger metric names (#104367) --- .../java/org/elasticsearch/health/HealthPeriodicLogger.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java index 288837fb3c808..a113bad942cc8 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java +++ b/server/src/main/java/org/elasticsearch/health/HealthPeriodicLogger.java @@ -374,7 +374,7 @@ void writeMetrics(List healthIndicatorResults) { if (metric == null) { metric = LongGaugeMetric.create( this.meterRegistry, - String.format(Locale.ROOT, "es.health.%s.red", metricName), + String.format(Locale.ROOT, "es.health.%s.red.status", metricName), String.format(Locale.ROOT, "%s: Red", metricName), "{cluster}" ); From 6a4a22f24e0f876ff8828ea5fe75f6415a79bb7d Mon Sep 17 00:00:00 2001 From: Max Hniebergall <137079448+maxhniebergall@users.noreply.github.com> Date: Mon, 15 Jan 2024 10:06:26 -0500 Subject: [PATCH 31/95] [ML] Automatically download the ELSER model when PUT in _inference (#104334) * Automatically download ELSER when PUT in _inference * Revert "Disable elser download test case in inf IT (#104271)" * add IT * disable IT --- docs/changelog/104334.yaml | 5 ++++ .../inference/InferenceService.java | 11 +++++++ .../inference/InferenceBaseRestTest.java | 16 ---------- .../xpack/inference/InferenceCrudIT.java | 24 --------------- .../TransportPutInferenceModelAction.java | 20 +++++++++---- .../services/elser/ElserMlNodeService.java | 30 +++++++++++++++++++ 6 files changed, 61 insertions(+), 45 deletions(-) create mode 100644 docs/changelog/104334.yaml diff --git a/docs/changelog/104334.yaml b/docs/changelog/104334.yaml new file mode 100644 index 0000000000000..ff242ee15141b --- /dev/null +++ b/docs/changelog/104334.yaml @@ -0,0 +1,5 @@ +pr: 104334 +summary: Automatically download the ELSER model when PUT in `_inference` +area: Machine Learning +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index 5eb3fc2ed6174..235de51d22572 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -97,6 +97,17 @@ default void stop(String modelId, ActionListener listener) { listener.onResponse(true); } + /** + * Put the model definition (if applicable) + * The main purpose of this function is to download ELSER + * The default action does nothing except acknowledge the request (true). + * @param modelVariant The configuration of the model variant to be downloaded + * @param listener The listener + */ + default void putModel(Model modelVariant, ActionListener listener) { + listener.onResponse(true); + } + /** * Optionally test the new model configuration in the inference service. * This function should be called when the model is first created, the diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java index c8af3fc64521f..1578e03608e82 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceBaseRestTest.java @@ -73,14 +73,6 @@ protected Map putModel(String modelId, String modelConfig, TaskT return entityAsMap(response); } - protected Map deleteModel(String modelId, TaskType taskType) throws IOException { - var endpoint = Strings.format("_inference/%s/%s", taskType, modelId); - var request = new Request("DELETE", endpoint); - var response = client().performRequest(request); - assertOkOrCreated(response); - return entityAsMap(response); - } - protected Map getModels(String modelId, TaskType taskType) throws IOException { var endpoint = Strings.format("_inference/%s/%s", taskType, modelId); var request = new Request("GET", endpoint); @@ -97,14 +89,6 @@ protected Map getAllModels() throws IOException { return entityAsMap(response); } - protected Map getTrainedModel(String modelId) throws IOException { - var endpoint = Strings.format("_ml/trained_models/%s/_stats", modelId); - var request = new Request("GET", endpoint); - var response = client().performRequest(request); - assertOkOrCreated(response); - return entityAsMap(response); - } - protected Map inferOnMockService(String modelId, TaskType taskType, List input) throws IOException { var endpoint = Strings.format("_inference/%s/%s", taskType, modelId); var request = new Request("POST", endpoint); diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index ee99745d40090..84b6bb94503c3 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -21,30 +21,6 @@ public class InferenceCrudIT extends InferenceBaseRestTest { - public void testElserCrud() throws IOException { - - String elserConfig = """ - { - "service": "elser", - "service_settings": { - "num_allocations": 1, - "num_threads": 1 - }, - "task_settings": {} - } - """; - - // ELSER not downloaded case - { - String modelId = randomAlphaOfLength(10).toLowerCase(); - expectThrows(ResponseException.class, () -> putModel(modelId, elserConfig, TaskType.SPARSE_EMBEDDING)); - } - - // Happy Case - // We choose not to test the case where ELSER is downloaded to avoid causing excessive network traffic. - // This test case will be tested separately outside of CI - } - @SuppressWarnings("unchecked") public void testGet() throws IOException { for (int i = 0; i < 5; i++) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java index 142e071c9a133..8bcc07a6322bc 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportPutInferenceModelAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; @@ -186,11 +187,20 @@ private void parseAndStoreModel( ); } - private static void startModel(InferenceService service, Model model, ActionListener listener) { - service.start( - model, - listener.delegateFailureAndWrap((l, ok) -> l.onResponse(new PutInferenceModelAction.Response(model.getConfigurations()))) - ); + private static void startModel(InferenceService service, Model model, ActionListener finalListener) { + SubscribableListener.newForked((listener1) -> { service.putModel(model, listener1); }).< + PutInferenceModelAction.Response>andThen((listener2, modelDidPut) -> { + if (modelDidPut) { + service.start( + model, + listener2.delegateFailureAndWrap( + (l3, ok) -> l3.onResponse(new PutInferenceModelAction.Response(model.getConfigurations())) + ) + ); + } else { + logger.warn("Failed to put model [{}]", model.getModelId()); + } + }).addListener(finalListener); } private Map requestToMap(PutInferenceModelAction.Request request) throws IOException { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java index 6f1da745b3c23..4755c11ece9fe 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java @@ -27,8 +27,11 @@ import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; import org.elasticsearch.xpack.core.ml.action.CreateTrainedModelAssignmentAction; import org.elasticsearch.xpack.core.ml.action.InferTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.action.StopTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; +import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TextExpansionConfigUpdate; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -37,6 +40,8 @@ import java.util.Map; import java.util.Set; +import static org.elasticsearch.xpack.core.ClientHelper.INFERENCE_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus.State.STARTED; import static org.elasticsearch.xpack.inference.services.ServiceUtils.removeFromMapOrThrowIfNull; import static org.elasticsearch.xpack.inference.services.ServiceUtils.throwIfNotEmptyMap; @@ -229,6 +234,31 @@ public void infer(Model model, List input, Map taskSetti ); } + @Override + public void putModel(Model model, ActionListener listener) { + if (model instanceof ElserMlNodeModel == false) { + listener.onFailure( + new IllegalStateException("Error starting model, [" + model.getConfigurations().getModelId() + "] is not an elser model") + ); + return; + } else { + String modelVariant = ((ElserMlNodeModel) model).getServiceSettings().getModelVariant(); + var fieldNames = List.of(); + var input = new TrainedModelInput(fieldNames); + var config = TrainedModelConfig.builder().setInput(input).setModelId(modelVariant).build(); + PutTrainedModelAction.Request putRequest = new PutTrainedModelAction.Request(config, false, true); + executeAsyncWithOrigin( + client, + INFERENCE_ORIGIN, + PutTrainedModelAction.INSTANCE, + putRequest, + listener.delegateFailure((l, r) -> { + l.onResponse(Boolean.TRUE); + }) + ); + } + } + private static ElserMlNodeTaskSettings taskSettingsFromMap(TaskType taskType, Map config) { if (taskType != TaskType.SPARSE_EMBEDDING) { throw new ElasticsearchStatusException(TaskType.unsupportedTaskTypeErrorMsg(taskType, NAME), RestStatus.BAD_REQUEST); From be9034bb425f8ade409b88b5d325befb19f48569 Mon Sep 17 00:00:00 2001 From: Carlos Delgado <6339205+carlosdelest@users.noreply.github.com> Date: Mon, 15 Jan 2024 16:44:04 +0100 Subject: [PATCH 32/95] Fix ShardSplittingQueryTests by not using MockRandomMergePolicy for IndexWriter (#104358) --- .../index/shard/ShardSplittingQueryTests.java | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java index 0895d680046c9..86408b3b22ed7 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.IndexRouting; @@ -46,7 +47,7 @@ public void testSplitOnID() throws IOException { SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); Directory dir = newFSDirectory(createTempDir()); final int numDocs = randomIntBetween(50, 100); - RandomIndexWriter writer = new RandomIndexWriter(random(), dir); + RandomIndexWriter writer = createIndexWriter(dir); int numShards = randomIntBetween(2, 10); IndexMetadata metadata = IndexMetadata.builder("test") .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) @@ -68,12 +69,11 @@ public void testSplitOnID() throws IOException { dir.close(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104349") public void testSplitOnRouting() throws IOException { SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); Directory dir = newFSDirectory(createTempDir()); final int numDocs = randomIntBetween(50, 100); - RandomIndexWriter writer = new RandomIndexWriter(random(), dir); + RandomIndexWriter writer = createIndexWriter(dir); int numShards = randomIntBetween(2, 10); IndexMetadata metadata = IndexMetadata.builder("test") .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) @@ -98,7 +98,7 @@ public void testSplitOnIdOrRouting() throws IOException { SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); Directory dir = newFSDirectory(createTempDir()); final int numDocs = randomIntBetween(50, 100); - RandomIndexWriter writer = new RandomIndexWriter(random(), dir); + RandomIndexWriter writer = createIndexWriter(dir); int numShards = randomIntBetween(2, 10); IndexMetadata metadata = IndexMetadata.builder("test") .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) @@ -125,7 +125,7 @@ public void testSplitOnRoutingPartitioned() throws IOException { SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); Directory dir = newFSDirectory(createTempDir()); final int numDocs = randomIntBetween(50, 100); - RandomIndexWriter writer = new RandomIndexWriter(random(), dir); + RandomIndexWriter writer = createIndexWriter(dir); int numShards = randomIntBetween(2, 10); IndexMetadata metadata = IndexMetadata.builder("test") .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())) @@ -237,4 +237,12 @@ private Iterable topLevel(IndexRouting indexRouting, int id, @Nu private int shardId(IndexRouting indexRouting, int id, @Nullable String routing) { return indexRouting.getShard(Integer.toString(id), routing); } + + private static RandomIndexWriter createIndexWriter(Directory dir) throws IOException { + return new RandomIndexWriter( + random(), + dir, + LuceneTestCase.newIndexWriterConfig().setMergePolicy(LuceneTestCase.newMergePolicy(random(), false)) + ); + } } From 0a212103178a584711019187b1c72072e48b1857 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Slobodan=20Adamovi=C4=87?= Date: Mon, 15 Jan 2024 17:11:57 +0100 Subject: [PATCH 33/95] Expose realms authentication metrics (#104200) This PR adds metrics for recording successful and failed authentications for individual realms. Exposed metrics are: - `es.security.authc.realms.success.total` - `es.security.authc.realms.failures.total` - `es.security.authc.realms.time` Each of the metric is exposed at node level and includes additional information with these attributes: - `es.security.realm_type` - can be one of: `jwt`, `saml`, `oidc`, `active_directory`, `ldap`, `pki`, `kerberos`... - `es.security.realm_name` - `es.security.realm_authc_failure_reason` --- docs/changelog/104200.yaml | 5 + .../security/authc/AuthenticationService.java | 2 +- .../security/authc/RealmsAuthenticator.java | 149 +++++++++++------- .../security/metric/SecurityMetricType.java | 7 + .../authc/AuthenticationServiceTests.java | 10 +- .../authc/RealmsAuthenticatorTests.java | 120 +++++++++++++- .../authc/jwt/JwtTokenExtractionTests.java | 7 +- 7 files changed, 238 insertions(+), 62 deletions(-) create mode 100644 docs/changelog/104200.yaml diff --git a/docs/changelog/104200.yaml b/docs/changelog/104200.yaml new file mode 100644 index 0000000000000..bc2aa2507f0ec --- /dev/null +++ b/docs/changelog/104200.yaml @@ -0,0 +1,5 @@ +pr: 104200 +summary: Expose realms authentication metrics +area: Authentication +type: enhancement +issues: [] diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java index 06883d9423387..e806e11d3f16d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/AuthenticationService.java @@ -114,7 +114,7 @@ public AuthenticationService( new ServiceAccountAuthenticator(serviceAccountService, nodeName, meterRegistry), new OAuth2TokenAuthenticator(tokenService, meterRegistry), new ApiKeyAuthenticator(apiKeyService, nodeName, meterRegistry), - new RealmsAuthenticator(numInvalidation, lastSuccessfulAuthCache) + new RealmsAuthenticator(numInvalidation, lastSuccessfulAuthCache, meterRegistry) ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticator.java index 25054982655c4..51af3a7eda665 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticator.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Tuple; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.xpack.core.common.IteratingActionListener; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; @@ -25,9 +26,13 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.RealmUserLookup; +import org.elasticsearch.xpack.security.metric.InstrumentedSecurityActionListener; +import org.elasticsearch.xpack.security.metric.SecurityMetricType; +import org.elasticsearch.xpack.security.metric.SecurityMetrics; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -35,19 +40,40 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import java.util.function.LongSupplier; import static org.elasticsearch.core.Strings.format; public class RealmsAuthenticator implements Authenticator { + public static final String ATTRIBUTE_REALM_NAME = "es.security.realm_name"; + public static final String ATTRIBUTE_REALM_TYPE = "es.security.realm_type"; + public static final String ATTRIBUTE_REALM_AUTHC_FAILURE_REASON = "es.security.realm_authc_failure_reason"; + private static final Logger logger = LogManager.getLogger(RealmsAuthenticator.class); private final AtomicLong numInvalidation; private final Cache lastSuccessfulAuthCache; + private final SecurityMetrics authenticationMetrics; + + public RealmsAuthenticator(AtomicLong numInvalidation, Cache lastSuccessfulAuthCache, MeterRegistry meterRegistry) { + this(numInvalidation, lastSuccessfulAuthCache, meterRegistry, System::nanoTime); + } - public RealmsAuthenticator(AtomicLong numInvalidation, Cache lastSuccessfulAuthCache) { + RealmsAuthenticator( + AtomicLong numInvalidation, + Cache lastSuccessfulAuthCache, + MeterRegistry meterRegistry, + LongSupplier nanoTimeSupplier + ) { this.numInvalidation = numInvalidation; this.lastSuccessfulAuthCache = lastSuccessfulAuthCache; + this.authenticationMetrics = new SecurityMetrics<>( + SecurityMetricType.AUTHC_REALMS, + meterRegistry, + this::buildMetricAttributes, + nanoTimeSupplier + ); } @Override @@ -141,66 +167,69 @@ private void consumeToken(Context context, ActionListener { - assert result != null : "Realm " + realm + " produced a null authentication result"; - logger.debug( - "Authentication of [{}] using realm [{}] with token [{}] was [{}]", - authenticationToken.principal(), - realm, - authenticationToken.getClass().getSimpleName(), - result - ); - if (result.getStatus() == AuthenticationResult.Status.SUCCESS) { - // user was authenticated, populate the authenticated by information - authenticatedByRef.set(realm); - authenticationResultRef.set(result); - if (lastSuccessfulAuthCache != null && startInvalidation == numInvalidation.get()) { - lastSuccessfulAuthCache.put(authenticationToken.principal(), realm); - } - userListener.onResponse(result.getValue()); - } else { - // the user was not authenticated, call this so we can audit the correct event - context.getRequest().realmAuthenticationFailed(authenticationToken, realm.name()); - if (result.getStatus() == AuthenticationResult.Status.TERMINATE) { - final var resultException = result.getException(); - if (resultException != null) { - logger.info( - () -> format( - "Authentication of [%s] was terminated by realm [%s] - %s", + realm.authenticate( + authenticationToken, + InstrumentedSecurityActionListener.wrapForAuthc(authenticationMetrics, realm, ActionListener.wrap(result -> { + assert result != null : "Realm " + realm + " produced a null authentication result"; + logger.debug( + "Authentication of [{}] using realm [{}] with token [{}] was [{}]", + authenticationToken.principal(), + realm, + authenticationToken.getClass().getSimpleName(), + result + ); + if (result.getStatus() == AuthenticationResult.Status.SUCCESS) { + // user was authenticated, populate the authenticated by information + authenticatedByRef.set(realm); + authenticationResultRef.set(result); + if (lastSuccessfulAuthCache != null && startInvalidation == numInvalidation.get()) { + lastSuccessfulAuthCache.put(authenticationToken.principal(), realm); + } + userListener.onResponse(result.getValue()); + } else { + // the user was not authenticated, call this so we can audit the correct event + context.getRequest().realmAuthenticationFailed(authenticationToken, realm.name()); + if (result.getStatus() == AuthenticationResult.Status.TERMINATE) { + final var resultException = result.getException(); + if (resultException != null) { + logger.info( + () -> format( + "Authentication of [%s] was terminated by realm [%s] - %s", + authenticationToken.principal(), + realm.name(), + result.getMessage() + ), + resultException + ); + userListener.onFailure(resultException); + } else { + logger.info( + "Authentication of [{}] was terminated by realm [{}] - {}", authenticationToken.principal(), realm.name(), result.getMessage() - ), - resultException - ); - userListener.onFailure(resultException); + ); + userListener.onFailure(AuthenticationTerminatedSuccessfullyException.INSTANCE); + } } else { - logger.info( - "Authentication of [{}] was terminated by realm [{}] - {}", - authenticationToken.principal(), - realm.name(), - result.getMessage() - ); - userListener.onFailure(AuthenticationTerminatedSuccessfullyException.INSTANCE); - } - } else { - if (result.getMessage() != null) { - messages.put(realm, new Tuple<>(result.getMessage(), result.getException())); + if (result.getMessage() != null) { + messages.put(realm, new Tuple<>(result.getMessage(), result.getException())); + } + userListener.onResponse(null); } - userListener.onResponse(null); } - } - }, (ex) -> { - logger.warn( - () -> format( - "An error occurred while attempting to authenticate [%s] against realm [%s]", - authenticationToken.principal(), - realm.name() - ), - ex - ); - userListener.onFailure(ex); - })); + }, (ex) -> { + logger.warn( + () -> format( + "An error occurred while attempting to authenticate [%s] against realm [%s]", + authenticationToken.principal(), + realm.name() + ), + ex + ); + userListener.onFailure(ex); + })) + ); } else { userListener.onResponse(null); } @@ -362,4 +391,14 @@ public synchronized Throwable fillInStackTrace() { return this; } } + + private Map buildMetricAttributes(Realm realm, String failureReason) { + final Map attributes = new HashMap<>(failureReason != null ? 3 : 2); + attributes.put(ATTRIBUTE_REALM_NAME, realm.name()); + attributes.put(ATTRIBUTE_REALM_TYPE, realm.type()); + if (failureReason != null) { + attributes.put(ATTRIBUTE_REALM_AUTHC_FAILURE_REASON, failureReason); + } + return attributes; + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricType.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricType.java index a77207c8e5677..02ac292aee781 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricType.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/metric/SecurityMetricType.java @@ -49,6 +49,13 @@ public enum SecurityMetricType { ) ), + AUTHC_REALMS( + SecurityMetricGroup.AUTHC, + new SecurityMetricInfo("es.security.authc.realms.success.total", "Number of successful realm authentications.", "count"), + new SecurityMetricInfo("es.security.authc.realms.failures.total", "Number of failed realm authentications.", "count"), + new SecurityMetricInfo("es.security.authc.realms.time", "Time it took (in nanoseconds) to execute realm authentication.", "ns") + ), + ; private final SecurityMetricGroup group; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index c524847e9dbbb..e9a252553fe8d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -575,7 +575,8 @@ public void testAuthenticateSmartRealmOrdering() { }, this::logAndFail)); verify(auditTrail).authenticationFailed(reqId.get(), firstRealm.name(), token, "_action", transportRequest); - verify(firstRealm, times(2)).name(); // used above one time + verify(firstRealm, times(4)).name(); // used above one time plus two times for authc result and time metrics + verify(firstRealm, times(2)).type(); // used two times to collect authc result and time metrics verify(secondRealm, times(2)).realmRef(); // also used in license tracking verify(firstRealm, times(2)).token(threadContext); verify(secondRealm, times(2)).token(threadContext); @@ -583,6 +584,8 @@ public void testAuthenticateSmartRealmOrdering() { verify(secondRealm, times(2)).supports(token); verify(firstRealm).authenticate(eq(token), anyActionListener()); verify(secondRealm, times(2)).authenticate(eq(token), anyActionListener()); + verify(secondRealm, times(4)).name(); // called two times for every authenticate call to collect authc result and time metrics + verify(secondRealm, times(4)).type(); // called two times for every authenticate call to collect authc result and time metrics verifyNoMoreInteractions(auditTrail, firstRealm, secondRealm); // Now assume some change in the backend system so that 2nd realm no longer has the user, but the 1st realm does. @@ -711,7 +714,8 @@ public void testAuthenticateSmartRealmOrderingDisabled() { verify(operatorPrivilegesService).maybeMarkOperatorUser(eq(result), eq(threadContext)); }, this::logAndFail)); verify(auditTrail, times(2)).authenticationFailed(reqId.get(), firstRealm.name(), token, "_action", transportRequest); - verify(firstRealm, times(3)).name(); // used above one time + verify(firstRealm, times(7)).name(); // used above one time plus two times for every call to collect success and time metrics + verify(firstRealm, times(4)).type(); // used two times for every call to collect authc result and time metrics verify(secondRealm, times(2)).realmRef(); verify(firstRealm, times(2)).token(threadContext); verify(secondRealm, times(2)).token(threadContext); @@ -719,6 +723,8 @@ public void testAuthenticateSmartRealmOrderingDisabled() { verify(secondRealm, times(2)).supports(token); verify(firstRealm, times(2)).authenticate(eq(token), anyActionListener()); verify(secondRealm, times(2)).authenticate(eq(token), anyActionListener()); + verify(secondRealm, times(4)).name(); // called two times for every authenticate call to collect authc result and time metrics + verify(secondRealm, times(4)).type(); // called two times for every authenticate call to collect authc result and time metrics verifyNoMoreInteractions(auditTrail, firstRealm, secondRealm); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java index 0309404fee84d..b62fc4ab6b04d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsAuthenticatorTests.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Tuple; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; @@ -28,24 +28,28 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmDomain; import org.elasticsearch.xpack.core.security.user.User; +import org.elasticsearch.xpack.security.metric.SecurityMetricType; import org.junit.Before; import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class RealmsAuthenticatorTests extends ESTestCase { +public class RealmsAuthenticatorTests extends AbstractAuthenticatorTests { private ThreadContext threadContext; private Realms realms; @@ -63,6 +67,8 @@ public class RealmsAuthenticatorTests extends ESTestCase { private Cache lastSuccessfulAuthCache; private String nodeName; private RealmsAuthenticator realmsAuthenticator; + private TestTelemetryPlugin telemetryPlugin; + private TestNanoTimeSupplier nanoTimeSupplier; @SuppressWarnings("unchecked") @Before @@ -101,7 +107,14 @@ public void init() throws Exception { numInvalidation = new AtomicLong(); lastSuccessfulAuthCache = mock(Cache.class); - realmsAuthenticator = new RealmsAuthenticator(numInvalidation, lastSuccessfulAuthCache); + telemetryPlugin = new TestTelemetryPlugin(); + nanoTimeSupplier = new TestNanoTimeSupplier(randomLongBetween(0, 100)); + realmsAuthenticator = new RealmsAuthenticator( + numInvalidation, + lastSuccessfulAuthCache, + telemetryPlugin.getTelemetryProvider(Settings.EMPTY).getMeterRegistry(), + nanoTimeSupplier + ); } public void testExtractCredentials() { @@ -258,6 +271,107 @@ public void testEmptyRunAsUsernameWillFail() { assertThat(expectThrows(ElasticsearchSecurityException.class, future::actionGet), is(e)); } + public void testRecodingSuccessfulAuthenticationMetrics() { + when(lastSuccessfulAuthCache.get(username)).thenReturn(randomFrom(realm1, realm2, null)); + final Realm successfulRealm = randomFrom(realm1, realm2); + when(successfulRealm.supports(authenticationToken)).thenReturn(true); + final long successfulExecutionTimeInNanos = randomLongBetween(0, 500); + doAnswer(invocationOnMock -> { + nanoTimeSupplier.advanceTime(successfulExecutionTimeInNanos); + @SuppressWarnings("unchecked") + final ActionListener> listener = (ActionListener>) invocationOnMock + .getArguments()[1]; + listener.onResponse(AuthenticationResult.success(user)); + return null; + }).when(successfulRealm).authenticate(eq(authenticationToken), any()); + + final Authenticator.Context context = createAuthenticatorContext(); + context.addAuthenticationToken(authenticationToken); + + final PlainActionFuture> future = new PlainActionFuture<>(); + realmsAuthenticator.authenticate(context, future); + final AuthenticationResult result = future.actionGet(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + + assertSingleSuccessAuthMetric( + telemetryPlugin, + SecurityMetricType.AUTHC_REALMS, + Map.ofEntries( + Map.entry(RealmsAuthenticator.ATTRIBUTE_REALM_NAME, successfulRealm.name()), + Map.entry(RealmsAuthenticator.ATTRIBUTE_REALM_TYPE, successfulRealm.type()) + ) + ); + + assertZeroFailedAuthMetrics(telemetryPlugin, SecurityMetricType.AUTHC_REALMS); + + assertAuthenticationTimeMetric( + telemetryPlugin, + SecurityMetricType.AUTHC_REALMS, + successfulExecutionTimeInNanos, + Map.ofEntries( + Map.entry(RealmsAuthenticator.ATTRIBUTE_REALM_NAME, successfulRealm.name()), + Map.entry(RealmsAuthenticator.ATTRIBUTE_REALM_TYPE, successfulRealm.type()) + ) + ); + } + + public void testRecordingFailedAuthenticationMetric() { + when(lastSuccessfulAuthCache.get(username)).thenReturn(randomFrom(realm1, realm2, null)); + + final Realm unsuccessfulRealm; + if (randomBoolean()) { + when(realm1.supports(authenticationToken)).thenReturn(false); + unsuccessfulRealm = realm2; + } else { + when(realm2.supports(authenticationToken)).thenReturn(false); + unsuccessfulRealm = realm1; + } + + when(unsuccessfulRealm.supports(authenticationToken)).thenReturn(true); + final long unsuccessfulExecutionTimeInNanos = randomLongBetween(0, 500); + doAnswer(invocationOnMock -> { + nanoTimeSupplier.advanceTime(unsuccessfulExecutionTimeInNanos); + @SuppressWarnings("unchecked") + final ActionListener> listener = (ActionListener>) invocationOnMock + .getArguments()[1]; + listener.onResponse(AuthenticationResult.unsuccessful("unsuccessful realms authentication", null)); + return null; + }).when(unsuccessfulRealm).authenticate(eq(authenticationToken), any()); + + final Authenticator.Context context = createAuthenticatorContext(); + final ElasticsearchSecurityException exception = new ElasticsearchSecurityException("realms authentication failed"); + when(request.authenticationFailed(same(authenticationToken))).thenReturn(exception); + context.addAuthenticationToken(authenticationToken); + + final PlainActionFuture> future = new PlainActionFuture<>(); + realmsAuthenticator.authenticate(context, future); + var e = expectThrows(ElasticsearchSecurityException.class, () -> future.actionGet()); + assertThat(e, sameInstance(exception)); + + assertSingleFailedAuthMetric( + telemetryPlugin, + SecurityMetricType.AUTHC_REALMS, + Map.ofEntries( + Map.entry(RealmsAuthenticator.ATTRIBUTE_REALM_NAME, unsuccessfulRealm.name()), + Map.entry(RealmsAuthenticator.ATTRIBUTE_REALM_TYPE, unsuccessfulRealm.type()), + Map.entry(RealmsAuthenticator.ATTRIBUTE_REALM_AUTHC_FAILURE_REASON, "unsuccessful realms authentication") + ) + ); + + assertZeroSuccessAuthMetrics(telemetryPlugin, SecurityMetricType.AUTHC_REALMS); + + assertAuthenticationTimeMetric( + telemetryPlugin, + SecurityMetricType.AUTHC_REALMS, + unsuccessfulExecutionTimeInNanos, + Map.ofEntries( + Map.entry(RealmsAuthenticator.ATTRIBUTE_REALM_NAME, unsuccessfulRealm.name()), + Map.entry(RealmsAuthenticator.ATTRIBUTE_REALM_TYPE, unsuccessfulRealm.type()) + ) + ); + + } + private void configureRealmAuthResponse(Realm realm, AuthenticationResult authenticationResult) { doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtTokenExtractionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtTokenExtractionTests.java index 8662561aca1ae..9d2e8228aedde 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtTokenExtractionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtTokenExtractionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.authc.AuthenticationToken; import org.elasticsearch.xpack.core.security.authc.Realm; @@ -54,7 +55,11 @@ public void testRealmLetsThroughInvalidJWTs() { Realms realms = mock(Realms.class); // mock realm sits in-between when(realms.getActiveRealms()).thenReturn(List.of(jwtRealm1, mockRealm, jwtRealm2)); - RealmsAuthenticator realmsAuthenticator = new RealmsAuthenticator(mock(AtomicLong.class), (Cache) mock(Cache.class)); + RealmsAuthenticator realmsAuthenticator = new RealmsAuthenticator( + mock(AtomicLong.class), + (Cache) mock(Cache.class), + MeterRegistry.NOOP + ); final Authenticator.Context context = new Authenticator.Context( threadContext, mock(AuthenticationService.AuditableRequest.class), From 293d0302fb5a32d2947e57cbfe54251c6f61a1a5 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Mon, 15 Jan 2024 17:13:56 +0100 Subject: [PATCH 34/95] Fix double registered metrics and chain getters throwing NPE #104306 some of the metrics were accidentaly registered under the same name and were logging errors in apm agent logs (#103388) also there were nullpointers in apm agent logs, the chained getters were replaced to use an optional the fix in #103520 does not helped with NPEs --- .../monitor/metrics/NodeMetrics.java | 325 +++++++++++++++--- 1 file changed, 269 insertions(+), 56 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java b/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java index 476ad516aab80..807844d983135 100644 --- a/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java +++ b/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Optional; /** * NodeMetrics monitors various statistics of an Elasticsearch node and exposes them as metrics through @@ -65,7 +66,13 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indices.get.total", "Total number of get operations", "operation", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getGet().getCount()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getGet()) + .map(o -> o.getCount()) + .orElse(0L) + ) ) ); @@ -74,7 +81,13 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indices.get.time", "Time in milliseconds spent performing get operations.", "milliseconds", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getGet().getTimeInMillis()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getGet()) + .map(o -> o.getTimeInMillis()) + .orElse(0L) + ) ) ); @@ -83,7 +96,14 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indices.search.fetch.total", "Total number of fetch operations.", "operation", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getSearch().getTotal().getFetchCount()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getSearch()) + .map(o -> o.getTotal()) + .map(o -> o.getFetchCount()) + .orElse(0L) + ) ) ); @@ -92,7 +112,14 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indices.search.fetch.time", "Time in milliseconds spent performing fetch operations.", "milliseconds", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getSearch().getTotal().getFetchTimeInMillis()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getSearch()) + .map(o -> o.getTotal()) + .map(o -> o.getFetchTimeInMillis()) + .orElse(0L) + ) ) ); @@ -101,7 +128,13 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indices.merge.total", "Total number of merge operations.", "operation", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getMerge().getTotal()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getMerge()) + .map(o -> o.getTotal()) + .orElse(0L) + ) ) ); @@ -110,7 +143,13 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indices.merge.time", "Time in milliseconds spent performing merge operations.", "milliseconds", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getMerge().getTotalTimeInMillis()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getMerge()) + .map(o -> o.getTotalTimeInMillis()) + .orElse(0L) + ) ) ); @@ -119,7 +158,13 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.translog.operations.total", "Number of transaction log operations.", "operation", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().estimatedNumberOfOperations()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getTranslog()) + .map(o -> o.estimatedNumberOfOperations()) + .orElse(0) + ) ) ); @@ -128,7 +173,13 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.translog.size", "Size, in bytes, of the transaction log.", "bytes", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().getTranslogSizeInBytes()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getTranslog()) + .map(o -> o.getTranslogSizeInBytes()) + .orElse(0L) + ) ) ); @@ -137,7 +188,13 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.translog.uncommitted_operations.total", "Number of uncommitted transaction log operations.", "operations", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().getUncommittedOperations()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getTranslog()) + .map(o -> o.getUncommittedOperations()) + .orElse(0) + ) ) ); @@ -146,7 +203,13 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.translog.uncommitted_operations.size", "Size, in bytes, of uncommitted transaction log operations.", "bytes", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().getUncommittedSizeInBytes()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getTranslog()) + .map(o -> o.getUncommittedSizeInBytes()) + .orElse(0L) + ) ) ); @@ -155,7 +218,13 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.translog.earliest_last_modified.time", "Earliest last modified age for the transaction log.", "time", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().getEarliestLastModifiedAge()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getTranslog()) + .map(o -> o.getEarliestLastModifiedAge()) + .orElse(0L) + ) ) ); @@ -164,7 +233,13 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.transport.rx.size", "Size, in bytes, of RX packets received by the node during internal cluster communication.", "bytes", - () -> new LongWithAttributes(stats.getOrRefresh().getTransport().getRxSize().getBytes()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getTransport()) + .map(o -> o.getRxSize()) + .map(o -> o.getBytes()) + .orElse(0L) + ) ) ); @@ -173,7 +248,13 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.transport.tx.size", "Size, in bytes, of TX packets sent by the node during internal cluster communication.", "bytes", - () -> new LongWithAttributes(stats.getOrRefresh().getTransport().getTxSize().getBytes()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getTransport()) + .map(o -> o.getTxSize()) + .map(o -> o.getBytes()) + .orElse(0L) + ) ) ); @@ -182,7 +263,9 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.jvm.mem.pools.young.size", "Memory, in bytes, used by the young generation heap.", "bytes", - () -> new LongWithAttributes(bytesUsedByGCGen(stats.getOrRefresh().getJvm().getMem(), GcNames.YOUNG)) + () -> new LongWithAttributes( + bytesUsedByGCGen(Optional.ofNullable(stats.getOrRefresh()).map(o -> o.getJvm()).map(o -> o.getMem()), GcNames.YOUNG) + ) ) ); @@ -191,7 +274,9 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.jvm.mem.pools.survivor.size", "Memory, in bytes, used by the survivor space.", "bytes", - () -> new LongWithAttributes(bytesUsedByGCGen(stats.getOrRefresh().getJvm().getMem(), GcNames.SURVIVOR)) + () -> new LongWithAttributes( + bytesUsedByGCGen(Optional.ofNullable(stats.getOrRefresh()).map(o -> o.getJvm()).map(o -> o.getMem()), GcNames.SURVIVOR) + ) ) ); @@ -200,7 +285,9 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.jvm.mem.pools.old.size", "Memory, in bytes, used by the old generation heap.", "bytes", - () -> new LongWithAttributes(bytesUsedByGCGen(stats.getOrRefresh().getJvm().getMem(), GcNames.OLD)) + () -> new LongWithAttributes( + bytesUsedByGCGen(Optional.ofNullable(stats.getOrRefresh()).map(o -> o.getJvm()).map(o -> o.getMem()), GcNames.OLD) + ) ) ); @@ -209,7 +296,13 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.fs.io_stats.time.total", "The total time in millis spent performing I/O operations across all devices used by Elasticsearch.", "milliseconds", - () -> new LongWithAttributes(stats.getOrRefresh().getFs().getIoStats().getTotalIOTimeMillis()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getFs()) + .map(o -> o.getIoStats()) + .map(o -> o.getTotalIOTimeMillis()) + .orElse(0L) + ) ) ); @@ -218,61 +311,112 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indexing.docs.total", "Total number of indexed documents", "documents", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getIndexCount()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getIndexing()) + .map(o -> o.getTotal()) + .map(o -> o.getIndexCount()) + .orElse(0L) + ) ) ); metrics.add( registry.registerLongGauge( - "es.indexing.docs.total", + "es.indexing.docs.current.total", "Current number of indexing documents", "documents", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getIndexCurrent()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getIndexing()) + .map(o -> o.getTotal()) + .map(o -> o.getIndexCurrent()) + .orElse(0L) + ) ) ); metrics.add( registry.registerLongAsyncCounter( - "es.indices.indexing.failed.total", + "es.indexing.indexing.failed.total", "Total number of failed indexing operations", "operations", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getIndexFailedCount()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getIndexing()) + .map(o -> o.getTotal()) + .map(o -> o.getIndexFailedCount()) + .orElse(0L) + ) ) ); metrics.add( registry.registerLongAsyncCounter( - "es.indices.deletion.docs.total", + "es.indexing.deletion.docs.total", "Total number of deleted documents", "documents", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getDeleteCount()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getIndexing()) + .map(o -> o.getTotal()) + .map(o -> o.getDeleteCount()) + .orElse(0L) + ) ) ); metrics.add( registry.registerLongGauge( - "es.indices.deletion.docs.total", + "es.indexing.deletion.docs.current.total", "Current number of deleting documents", "documents", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getDeleteCurrent()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getIndexing()) + .map(o -> o.getTotal()) + .map(o -> o.getDeleteCurrent()) + .orElse(0L) + ) ) ); metrics.add( registry.registerLongAsyncCounter( - "es.indices.indexing.time", + "es.indexing.time", "Total indices indexing time", "milliseconds", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getIndexTime().millis()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getIndexing()) + .map(o -> o.getTotal()) + .map(o -> o.getIndexTime()) + .map(o -> o.millis()) + .orElse(0L) + ) ) ); metrics.add( registry.registerLongAsyncCounter( - "es.indices.deletion.time", + "es.deletion.time", "Total indices deletion time", "milliseconds", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getDeleteTime().millis()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getIndexing()) + .map(o -> o.getTotal()) + .map(o -> o.getDeleteTime()) + .map(o -> o.millis()) + .orElse(0L) + ) ) ); @@ -281,7 +425,15 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indices.throttle.time", "Total indices throttle time", "milliseconds", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getThrottleTime().millis()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getIndexing()) + .map(o -> o.getTotal()) + .map(o -> o.getThrottleTime()) + .map(o -> o.millis()) + .orElse(0L) + ) ) ); @@ -290,7 +442,14 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indices.noop.total", "Total number of noop shard operations", "operations", - () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getNoopUpdateCount()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndices()) + .map(o -> o.getIndexing()) + .map(o -> o.getTotal()) + .map(o -> o.getNoopUpdateCount()) + .orElse(0L) + ) ) ); @@ -299,7 +458,12 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indexing.coordinating_operations.size", "Total number of memory bytes consumed by coordinating operations", "bytes", - () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getTotalCoordinatingBytes()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndexingPressureStats()) + .map(o -> o.getTotalCoordinatingBytes()) + .orElse(0L) + ) ) ); @@ -308,25 +472,40 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indexing.coordinating_operations.total", "Total number of coordinating operations", "operations", - () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getTotalCoordinatingOps()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndexingPressureStats()) + .map(o -> o.getTotalCoordinatingOps()) + .orElse(0L) + ) ) ); metrics.add( registry.registerLongGauge( - "es.indexing.coordinating_operations.size", + "es.indexing.coordinating_operations.current.size", "Current number of memory bytes consumed by coordinating operations", "bytes", - () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCurrentCoordinatingBytes()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndexingPressureStats()) + .map(o -> o.getCurrentCoordinatingBytes()) + .orElse(0L) + ) ) ); metrics.add( registry.registerLongGauge( - "es.indexing.coordinating_operations.total", + "es.indexing.coordinating_operations.current.total", "Current number of coordinating operations", "operations", - () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCurrentCoordinatingOps()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndexingPressureStats()) + .map(o -> o.getCurrentCoordinatingOps()) + .orElse(0L) + ) ) ); @@ -335,7 +514,12 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indexing.coordinating_operations.rejections.total", "Total number of coordinating operations rejections", "operations", - () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCoordinatingRejections()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndexingPressureStats()) + .map(o -> o.getCoordinatingRejections()) + .orElse(0L) + ) ) ); @@ -344,7 +528,12 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indexing.primary_operations.size", "Total number of memory bytes consumed by primary operations", "bytes", - () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getTotalPrimaryBytes()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndexingPressureStats()) + .map(o -> o.getTotalPrimaryBytes()) + .orElse(0L) + ) ) ); @@ -353,25 +542,40 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indexing.primary_operations.total", "Total number of primary operations", "operations", - () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getTotalPrimaryOps()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndexingPressureStats()) + .map(o -> o.getTotalPrimaryOps()) + .orElse(0L) + ) ) ); metrics.add( registry.registerLongGauge( - "es.indexing.primary_operations.size", + "es.indexing.primary_operations.current.size", "Current number of memory bytes consumed by primary operations", "bytes", - () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCurrentPrimaryBytes()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndexingPressureStats()) + .map(o -> o.getCurrentPrimaryBytes()) + .orElse(0L) + ) ) ); metrics.add( registry.registerLongGauge( - "es.indexing.primary_operations.total", + "es.indexing.primary_operations.current.total", "Current number of primary operations", "operations", - () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCurrentPrimaryOps()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndexingPressureStats()) + .map(o -> o.getCurrentPrimaryOps()) + .orElse(0L) + ) ) ); @@ -380,7 +584,12 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indexing.primary_operations.rejections.total", "Total number of primary operations rejections", "operations", - () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getPrimaryRejections()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()) + .map(o -> o.getIndexingPressureStats()) + .map(o -> o.getPrimaryRejections()) + .orElse(0L) + ) ) ); @@ -389,7 +598,9 @@ private void registerAsyncMetrics(MeterRegistry registry) { "es.indexing.memory.limit.size", "Current memory limit for primary and coordinating operations", "bytes", - () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getMemoryLimit()) + () -> new LongWithAttributes( + Optional.ofNullable(stats.getOrRefresh()).map(o -> o.getIndexingPressureStats()).map(o -> o.getMemoryLimit()).orElse(0L) + ) ) ); @@ -398,18 +609,20 @@ private void registerAsyncMetrics(MeterRegistry registry) { /** * Retrieves the bytes used by a specific garbage collection generation from the provided JvmStats.Mem. * - * @param mem The JvmStats.Mem containing memory pool information. - * @param name The name of the garbage collection generation (e.g., "young", "survivor", "old"). + * @param optionalMem The JvmStats.Mem containing memory pool information. + * @param name The name of the garbage collection generation (e.g., "young", "survivor", "old"). * @return The number of bytes used by the specified garbage collection generation. */ - private long bytesUsedByGCGen(JvmStats.Mem mem, String name) { - long bytesUsed = 0; - for (JvmStats.MemoryPool pool : mem) { - if (pool.getName().equals(name)) { - bytesUsed = pool.getUsed().getBytes(); + private long bytesUsedByGCGen(Optional optionalMem, String name) { + return optionalMem.map(mem -> { + long bytesUsed = 0; + for (JvmStats.MemoryPool pool : mem) { + if (pool.getName().equals(name)) { + bytesUsed = pool.getUsed().getBytes(); + } } - } - return bytesUsed; + return bytesUsed; + }).orElse(0L); } /** From a0cf690be8aa3b10d0767d7941259cc1ce0db223 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 16 Jan 2024 10:12:46 +1100 Subject: [PATCH 35/95] Retry indefinitely for s3 indices blob read errors (#103300) This PR makes the s3 readBlob to retry indefinitely on either opening or reading errors when the operation purpose is Inidces. It perform retries with no delay within the configured number of max retries. Beyond that, it retries with increased delay each time with a capped maximum of 10 seconds. Relates: ES-6453 --- docs/changelog/103300.yaml | 5 + .../s3/S3RetryingInputStream.java | 164 +++++++++----- .../repositories/s3/S3Service.java | 7 +- .../s3/S3BlobContainerRetriesTests.java | 202 ++++++++++++++++-- .../AbstractBlobContainerRetriesTestCase.java | 18 +- ...ESMockAPIBasedRepositoryIntegTestCase.java | 2 +- 6 files changed, 321 insertions(+), 77 deletions(-) create mode 100644 docs/changelog/103300.yaml diff --git a/docs/changelog/103300.yaml b/docs/changelog/103300.yaml new file mode 100644 index 0000000000000..a536a673b7827 --- /dev/null +++ b/docs/changelog/103300.yaml @@ -0,0 +1,5 @@ +pr: 103300 +summary: Retry indefinitely for s3 indices blob read errors +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java index 04eadba9f9f8f..93342079c60d1 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java @@ -14,9 +14,9 @@ import com.amazonaws.services.s3.model.S3Object; import com.amazonaws.services.s3.model.S3ObjectInputStream; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.Version; import org.elasticsearch.common.blobstore.OperationPurpose; import org.elasticsearch.core.IOUtils; @@ -48,7 +48,7 @@ class S3RetryingInputStream extends InputStream { private final String blobKey; private final long start; private final long end; - private final List failures; + private final List failures; private S3ObjectInputStream currentStream; private long currentStreamFirstOffset; @@ -77,29 +77,34 @@ class S3RetryingInputStream extends InputStream { this.failures = new ArrayList<>(MAX_SUPPRESSED_EXCEPTIONS); this.start = start; this.end = end; - openStream(); + openStreamWithRetry(); } - private void openStream() throws IOException { - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - final GetObjectRequest getObjectRequest = new GetObjectRequest(blobStore.bucket(), blobKey); - getObjectRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.GET_OBJECT, purpose)); - if (currentOffset > 0 || start > 0 || end < Long.MAX_VALUE - 1) { - assert start + currentOffset <= end - : "requesting beyond end, start = " + start + " offset=" + currentOffset + " end=" + end; - getObjectRequest.setRange(Math.addExact(start, currentOffset), end); - } - final S3Object s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(getObjectRequest)); - this.currentStreamFirstOffset = Math.addExact(start, currentOffset); - this.currentStreamLastOffset = Math.addExact(currentStreamFirstOffset, getStreamLength(s3Object)); - this.currentStream = s3Object.getObjectContent(); - } catch (final AmazonClientException e) { - if (e instanceof AmazonS3Exception amazonS3Exception) { - if (404 == amazonS3Exception.getStatusCode()) { - throw addSuppressedExceptions(new NoSuchFileException("Blob object [" + blobKey + "] not found: " + e.getMessage())); + private void openStreamWithRetry() throws IOException { + while (true) { + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + final GetObjectRequest getObjectRequest = new GetObjectRequest(blobStore.bucket(), blobKey); + getObjectRequest.setRequestMetricCollector(blobStore.getMetricCollector(Operation.GET_OBJECT, purpose)); + if (currentOffset > 0 || start > 0 || end < Long.MAX_VALUE - 1) { + assert start + currentOffset <= end + : "requesting beyond end, start = " + start + " offset=" + currentOffset + " end=" + end; + getObjectRequest.setRange(Math.addExact(start, currentOffset), end); + } + final S3Object s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(getObjectRequest)); + this.currentStreamFirstOffset = Math.addExact(start, currentOffset); + this.currentStreamLastOffset = Math.addExact(currentStreamFirstOffset, getStreamLength(s3Object)); + this.currentStream = s3Object.getObjectContent(); + return; + } catch (AmazonClientException e) { + if (e instanceof AmazonS3Exception amazonS3Exception && 404 == amazonS3Exception.getStatusCode()) { + throw addSuppressedExceptions( + new NoSuchFileException("Blob object [" + blobKey + "] not found: " + amazonS3Exception.getMessage()) + ); } + + final long delayInMillis = maybeLogAndComputeRetryDelay("opening", e); + delayBeforeRetry(delayInMillis); } - throw addSuppressedExceptions(e); } } @@ -166,45 +171,104 @@ private void ensureOpen() { } private void reopenStreamOrFail(IOException e) throws IOException { - if (purpose == OperationPurpose.REPOSITORY_ANALYSIS) { - logger.warn(() -> format(""" - failed reading [%s/%s] at offset [%s]""", blobStore.bucket(), blobKey, start + currentOffset), e); - throw e; + final long delayInMillis = maybeLogAndComputeRetryDelay("reading", e); + maybeAbort(currentStream); + IOUtils.closeWhileHandlingException(currentStream); + + delayBeforeRetry(delayInMillis); + openStreamWithRetry(); + } + + // The method throws if the operation should *not* be retried. Otherwise, it keeps a record for the attempt and associated failure + // and compute the delay before retry. + private long maybeLogAndComputeRetryDelay(String action, T e) throws T { + if (shouldRetry(attempt) == false) { + final var finalException = addSuppressedExceptions(e); + logForFailure(action, finalException); + throw finalException; } - final int maxAttempts = blobStore.getMaxRetries() + 1; + // Log at info level for the 1st retry and every ~5 minutes afterward + logForRetry((attempt == 1 || attempt % 30 == 0) ? Level.INFO : Level.DEBUG, action, e); + if (failures.size() < MAX_SUPPRESSED_EXCEPTIONS) { + failures.add(e); + } + final long delayInMillis = getRetryDelayInMillis(); + attempt += 1; // increment after computing delay because attempt affects the result + return delayInMillis; + } + + private void logForFailure(String action, Exception e) { + logger.warn( + () -> format( + "failed %s [%s/%s] at offset [%s] with purpose [%s]", + action, + blobStore.bucket(), + blobKey, + start + currentOffset, + purpose.getKey() + ), + e + ); + } + private void logForRetry(Level level, String action, Exception e) { final long meaningfulProgressSize = Math.max(1L, blobStore.bufferSizeInBytes() / 100L); final long currentStreamProgress = Math.subtractExact(Math.addExact(start, currentOffset), currentStreamFirstOffset); if (currentStreamProgress >= meaningfulProgressSize) { failuresAfterMeaningfulProgress += 1; } - final Supplier messageSupplier = () -> format( - """ - failed reading [%s/%s] at offset [%s]; this was attempt [%s] to read this blob which yielded [%s] bytes; in total \ - [%s] of the attempts to read this blob have made meaningful progress and do not count towards the maximum number of \ - retries; the maximum number of read attempts which do not make meaningful progress is [%s]""", - blobStore.bucket(), - blobKey, - start + currentOffset, - attempt, - currentStreamProgress, - failuresAfterMeaningfulProgress, - maxAttempts + logger.log( + level, + () -> format( + """ + failed %s [%s/%s] at offset [%s] with purpose [%s]; \ + this was attempt [%s] to read this blob which yielded [%s] bytes; in total \ + [%s] of the attempts to read this blob have made meaningful progress and do not count towards the maximum number of \ + retries; the maximum number of read attempts which do not make meaningful progress is [%s]""", + action, + blobStore.bucket(), + blobKey, + start + currentOffset, + purpose.getKey(), + attempt, + currentStreamProgress, + failuresAfterMeaningfulProgress, + maxRetriesForNoMeaningfulProgress() + ), + e ); - if (attempt >= maxAttempts + failuresAfterMeaningfulProgress) { - final var finalException = addSuppressedExceptions(e); - logger.warn(messageSupplier, finalException); - throw finalException; + } + + private boolean shouldRetry(int attempt) { + if (purpose == OperationPurpose.REPOSITORY_ANALYSIS) { + return false; } - logger.debug(messageSupplier, e); - attempt += 1; - if (failures.size() < MAX_SUPPRESSED_EXCEPTIONS) { - failures.add(e); + if (purpose == OperationPurpose.INDICES) { + return true; } - maybeAbort(currentStream); - IOUtils.closeWhileHandlingException(currentStream); - openStream(); + final int maxAttempts = blobStore.getMaxRetries() + 1; + return attempt < maxAttempts + failuresAfterMeaningfulProgress; + } + + private int maxRetriesForNoMeaningfulProgress() { + return purpose == OperationPurpose.INDICES ? Integer.MAX_VALUE : (blobStore.getMaxRetries() + 1); + } + + private void delayBeforeRetry(long delayInMillis) { + try { + assert shouldRetry(attempt - 1) : "should not have retried"; + Thread.sleep(delayInMillis); + } catch (InterruptedException e) { + logger.info("s3 input stream delay interrupted", e); + Thread.currentThread().interrupt(); + } + } + + // protected access for testing + protected long getRetryDelayInMillis() { + // Initial delay is 10 ms and cap max delay at 10 * 1024 millis, i.e. it retries every ~10 seconds at a minimum + return 10L << (Math.min(attempt - 1, 10)); } @Override @@ -247,7 +311,7 @@ public void reset() { } private T addSuppressedExceptions(T e) { - for (IOException failure : failures) { + for (Exception failure : failures) { e.addSuppressed(failure); } return e; diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 195a18891ebd0..1fd31047c735a 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -178,6 +178,11 @@ S3ClientSettings settings(RepositoryMetadata repositoryMetadata) { // proxy for testing AmazonS3 buildClient(final S3ClientSettings clientSettings) { + final AmazonS3ClientBuilder builder = buildClientBuilder(clientSettings); + return SocketAccess.doPrivileged(builder::build); + } + + protected AmazonS3ClientBuilder buildClientBuilder(S3ClientSettings clientSettings) { final AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); builder.withCredentials(buildCredentials(LOGGER, clientSettings, webIdentityTokenCredentialsProvider)); builder.withClientConfiguration(buildConfiguration(clientSettings)); @@ -206,7 +211,7 @@ AmazonS3 buildClient(final S3ClientSettings clientSettings) { if (clientSettings.disableChunkedEncoding) { builder.disableChunkedEncoding(); } - return SocketAccess.doPrivileged(builder::build); + return builder; } // pkg private for tests diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index 8f273bcad3cf5..34e14dc718818 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -7,7 +7,9 @@ */ package org.elasticsearch.repositories.s3; +import com.amazonaws.DnsResolver; import com.amazonaws.SdkClientException; +import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.internal.MD5DigestCalculatingInputStream; import com.amazonaws.util.Base16; import com.sun.net.httpserver.HttpExchange; @@ -50,10 +52,14 @@ import java.io.InputStream; import java.net.InetSocketAddress; import java.net.SocketTimeoutException; +import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; +import java.nio.file.NoSuchFileException; import java.util.Arrays; import java.util.Locale; import java.util.Objects; +import java.util.OptionalInt; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -67,6 +73,7 @@ import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -80,10 +87,25 @@ public class S3BlobContainerRetriesTests extends AbstractBlobContainerRetriesTestCase { private S3Service service; + private AtomicBoolean shouldErrorOnDns; @Before public void setUp() throws Exception { - service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY); + shouldErrorOnDns = new AtomicBoolean(false); + service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY) { + @Override + protected AmazonS3ClientBuilder buildClientBuilder(S3ClientSettings clientSettings) { + final AmazonS3ClientBuilder builder = super.buildClientBuilder(clientSettings); + final DnsResolver defaultDnsResolver = builder.getClientConfiguration().getDnsResolver(); + builder.getClientConfiguration().setDnsResolver(host -> { + if (shouldErrorOnDns.get() && randomBoolean() && randomBoolean()) { + throw new UnknownHostException(host); + } + return defaultDnsResolver.resolve(host); + }); + return builder; + } + }; super.setUp(); } @@ -150,29 +172,51 @@ protected BlobContainer createBlobContainer( Settings.builder().put(S3Repository.CLIENT_NAME.getKey(), clientName).build() ); - return new S3BlobContainer( - randomBoolean() ? BlobPath.EMPTY : BlobPath.EMPTY.add("foo"), - new S3BlobStore( - service, - "bucket", - S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getDefault(Settings.EMPTY), - bufferSize == null ? S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY) : bufferSize, - S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), - S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), - repositoryMetadata, - BigArrays.NON_RECYCLING_INSTANCE, - new DeterministicTaskQueue().getThreadPool(), - RepositoriesMetrics.NOOP - ) - ) { + final S3BlobStore s3BlobStore = new S3BlobStore( + service, + "bucket", + S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getDefault(Settings.EMPTY), + bufferSize == null ? S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY) : bufferSize, + S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), + S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), + repositoryMetadata, + BigArrays.NON_RECYCLING_INSTANCE, + new DeterministicTaskQueue().getThreadPool(), + RepositoriesMetrics.NOOP + ); + return new S3BlobContainer(randomBoolean() ? BlobPath.EMPTY : BlobPath.EMPTY.add("foo"), s3BlobStore) { @Override public InputStream readBlob(OperationPurpose purpose, String blobName) throws IOException { - return new AssertingInputStream(super.readBlob(purpose, blobName), blobName); + return new AssertingInputStream(new S3RetryingInputStream(purpose, s3BlobStore, buildKey(blobName)) { + @Override + protected long getRetryDelayInMillis() { + assert super.getRetryDelayInMillis() > 0; + return 0; + } + }, blobName); } @Override public InputStream readBlob(OperationPurpose purpose, String blobName, long position, long length) throws IOException { - return new AssertingInputStream(super.readBlob(purpose, blobName, position, length), blobName, position, length); + final InputStream inputStream; + if (length == 0) { + inputStream = new ByteArrayInputStream(new byte[0]); + } else { + inputStream = new S3RetryingInputStream( + purpose, + s3BlobStore, + buildKey(blobName), + position, + Math.addExact(position, length - 1) + ) { + @Override + protected long getRetryDelayInMillis() { + assert super.getRetryDelayInMillis() > 0; + return 0; + } + }; + } + return new AssertingInputStream(inputStream, blobName, position, length); } }; } @@ -574,6 +618,118 @@ public void handle(HttpExchange exchange) throws IOException { }); } + public void testReadWithIndicesPurposeRetriesForever() throws IOException { + final int maxRetries = between(0, 5); + final int totalFailures = Math.max(30, maxRetries * between(30, 80)); + final int bufferSizeBytes = scaledRandomIntBetween( + 0, + randomFrom(1000, Math.toIntExact(S3Repository.BUFFER_SIZE_SETTING.get(Settings.EMPTY).getBytes())) + ); + final BlobContainer blobContainer = createBlobContainer(maxRetries, null, true, ByteSizeValue.ofBytes(bufferSizeBytes)); + final int meaningfulProgressBytes = Math.max(1, bufferSizeBytes / 100); + + final byte[] bytes = randomBlobContent(512); + + shouldErrorOnDns.set(true); + final AtomicInteger failures = new AtomicInteger(); + @SuppressForbidden(reason = "use a http server") + class FlakyReadHandler implements HttpHandler { + + @Override + public void handle(HttpExchange exchange) throws IOException { + Streams.readFully(exchange.getRequestBody()); + if (failures.get() > totalFailures && randomBoolean()) { + final int rangeStart = getRangeStart(exchange); + assertThat(rangeStart, lessThan(bytes.length)); + exchange.getResponseHeaders().add("Content-Type", bytesContentType()); + final OptionalInt rangeEnd = getRangeEnd(exchange); + final int length; + if (rangeEnd.isPresent() == false) { + final var remainderLength = bytes.length - rangeStart; + exchange.sendResponseHeaders(HttpStatus.SC_OK, remainderLength); + length = remainderLength < meaningfulProgressBytes + ? remainderLength + : between(meaningfulProgressBytes, remainderLength); + } else { + final int effectiveRangeEnd = Math.min(bytes.length - 1, rangeEnd.getAsInt()); + length = (effectiveRangeEnd - rangeStart) + 1; + exchange.sendResponseHeaders(HttpStatus.SC_OK, length); + } + exchange.getResponseBody().write(bytes, rangeStart, length); + } else { + failures.incrementAndGet(); + if (randomBoolean()) { + exchange.sendResponseHeaders( + randomFrom( + HttpStatus.SC_INTERNAL_SERVER_ERROR, + HttpStatus.SC_BAD_GATEWAY, + HttpStatus.SC_SERVICE_UNAVAILABLE, + HttpStatus.SC_GATEWAY_TIMEOUT + ), + -1 + ); + } else { + if (randomBoolean()) { + final var bytesSent = sendIncompleteContent(exchange, bytes); + if (bytesSent >= meaningfulProgressBytes) { + exchange.getResponseBody().flush(); + } + } + } + } + exchange.close(); + } + } + + httpServer.createContext(downloadStorageEndpoint(blobContainer, "read_blob_retries_forever"), new FlakyReadHandler()); + + // Ranged read + final int position = between(0, bytes.length - 1); + final int length = between(0, randomBoolean() ? bytes.length : Integer.MAX_VALUE); + logger.info("--> position={}, length={}", position, length); + try (InputStream inputStream = blobContainer.readBlob(OperationPurpose.INDICES, "read_blob_retries_forever", position, length)) { + final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(inputStream)); + assertArrayEquals(Arrays.copyOfRange(bytes, position, Math.min(bytes.length, position + length)), bytesRead); + } + assertThat(failures.get(), greaterThan(totalFailures)); + + // Read the whole blob + failures.set(0); + try (InputStream inputStream = blobContainer.readBlob(OperationPurpose.INDICES, "read_blob_retries_forever")) { + final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(inputStream)); + assertArrayEquals(bytes, bytesRead); + } + assertThat(failures.get(), greaterThan(totalFailures)); + } + + public void testDoesNotRetryOnNotFound() { + final int maxRetries = between(3, 5); + final BlobContainer blobContainer = createBlobContainer(maxRetries, null, true, null); + + final AtomicInteger numberOfReads = new AtomicInteger(0); + @SuppressForbidden(reason = "use a http server") + class NotFoundReadHandler implements HttpHandler { + @Override + public void handle(HttpExchange exchange) throws IOException { + numberOfReads.incrementAndGet(); + exchange.sendResponseHeaders(HttpStatus.SC_NOT_FOUND, -1); + exchange.close(); + } + } + + httpServer.createContext(downloadStorageEndpoint(blobContainer, "read_blob_not_found"), new NotFoundReadHandler()); + expectThrows(NoSuchFileException.class, () -> { + try ( + InputStream inputStream = randomBoolean() + ? blobContainer.readBlob(randomRetryingPurpose(), "read_blob_not_found") + : blobContainer.readBlob(randomRetryingPurpose(), "read_blob_not_found", between(0, 100), between(1, 100)) + ) { + Streams.readFully(inputStream); + } + }); + assertThat(numberOfReads.get(), equalTo(1)); + } + @Override protected Matcher getMaxRetriesMatcher(int maxRetries) { // some attempts make meaningful progress and do not count towards the max retry limit @@ -585,6 +741,14 @@ protected OperationPurpose randomRetryingPurpose() { return randomValueOtherThan(OperationPurpose.REPOSITORY_ANALYSIS, BlobStoreTestUtil::randomPurpose); } + @Override + protected OperationPurpose randomFiniteRetryingPurpose() { + return randomValueOtherThanMany( + purpose -> purpose == OperationPurpose.REPOSITORY_ANALYSIS || purpose == OperationPurpose.INDICES, + BlobStoreTestUtil::randomPurpose + ); + } + /** * Asserts that an InputStream is fully consumed, or aborted, when it is closed */ @@ -605,6 +769,8 @@ private static class AssertingInputStream extends FilterInputStream { AssertingInputStream(InputStream in, String blobName, long position, long length) { super(in); + assert position >= 0L; + assert length >= 0; this.blobName = blobName; this.position = position; this.length = length; diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java index 8d44c37fcd9f1..01e21c929e654 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java @@ -253,7 +253,7 @@ public void testReadBlobWithReadTimeouts() { Exception exception = expectThrows( unresponsiveExceptionType(), - () -> Streams.readFully(blobContainer.readBlob(randomPurpose(), "read_blob_unresponsive")) + () -> Streams.readFully(blobContainer.readBlob(randomFiniteRetryingPurpose(), "read_blob_unresponsive")) ); assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out")); assertThat(exception.getCause(), instanceOf(SocketTimeoutException.class)); @@ -270,8 +270,8 @@ public void testReadBlobWithReadTimeouts() { exception = expectThrows(Exception.class, () -> { try ( InputStream stream = randomBoolean() - ? blobContainer.readBlob(randomRetryingPurpose(), "read_blob_incomplete") - : blobContainer.readBlob(randomRetryingPurpose(), "read_blob_incomplete", position, length) + ? blobContainer.readBlob(randomFiniteRetryingPurpose(), "read_blob_incomplete") + : blobContainer.readBlob(randomFiniteRetryingPurpose(), "read_blob_incomplete", position, length) ) { Streams.readFully(stream); } @@ -294,6 +294,10 @@ protected OperationPurpose randomRetryingPurpose() { return randomPurpose(); } + protected OperationPurpose randomFiniteRetryingPurpose() { + return randomPurpose(); + } + public void testReadBlobWithNoHttpResponse() { final TimeValue readTimeout = TimeValue.timeValueMillis(between(100, 200)); final BlobContainer blobContainer = createBlobContainer(randomInt(5), readTimeout, null, null); @@ -303,9 +307,9 @@ public void testReadBlobWithNoHttpResponse() { Exception exception = expectThrows(unresponsiveExceptionType(), () -> { if (randomBoolean()) { - Streams.readFully(blobContainer.readBlob(randomPurpose(), "read_blob_no_response")); + Streams.readFully(blobContainer.readBlob(randomFiniteRetryingPurpose(), "read_blob_no_response")); } else { - Streams.readFully(blobContainer.readBlob(randomPurpose(), "read_blob_no_response", 0, 1)); + Streams.readFully(blobContainer.readBlob(randomFiniteRetryingPurpose(), "read_blob_no_response", 0, 1)); } }); assertThat( @@ -328,8 +332,8 @@ public void testReadBlobWithPrematureConnectionClose() { final Exception exception = expectThrows(Exception.class, () -> { try ( InputStream stream = randomBoolean() - ? blobContainer.readBlob(randomRetryingPurpose(), "read_blob_incomplete", 0, 1) - : blobContainer.readBlob(randomRetryingPurpose(), "read_blob_incomplete") + ? blobContainer.readBlob(randomFiniteRetryingPurpose(), "read_blob_incomplete", 0, 1) + : blobContainer.readBlob(randomFiniteRetryingPurpose(), "read_blob_incomplete") ) { Streams.readFully(stream); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java index 1e66dd061d9b5..2a1cba66f79f9 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java @@ -254,7 +254,7 @@ protected static String serverUrl() { /** * Consumes and closes the given {@link InputStream} */ - protected static void drainInputStream(final InputStream inputStream) throws IOException { + public static void drainInputStream(final InputStream inputStream) throws IOException { while (inputStream.read(BUFFER) >= 0) ; } From c4c2ce83cb78896935e9402d3a782dba947686d9 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Tue, 16 Jan 2024 10:27:33 +0200 Subject: [PATCH 36/95] Downsampling supports date_histogram with tz (#103511) * Downsampling supports date_histogram with tz This comes with caveats, for downsampled indexes at intervals more than 15 minutes. For instance, - 1-hour downsampling will produce inaccurate results for 1-hour histograms on timezones shifted by XX:30 - 1-day downsampling will produce inaccurate daily histograms for not-UTC timezones as it tracks days at UTC. Related to #101309 * Update docs/changelog/103511.yaml * test daylight savings * update documentation * Offset time buckets over downsampled data with TZ * Update docs/changelog/103511.yaml * check for TSDS * fixme for transport version * add interval to index metadata * add transport version * bump up transport version * address feedbcak * spotless fix --- docs/changelog/103511.yaml | 6 + .../data-streams/downsampling.asciidoc | 24 +- .../org/elasticsearch/TransportVersions.java | 1 + .../cluster/metadata/IndexMetadata.java | 21 +- .../common/settings/IndexScopedSettings.java | 1 + .../index/query/QueryRewriteContext.java | 7 + .../DateHistogramAggregationBuilder.java | 58 +- .../DateHistogramAggregationSupplier.java | 1 + .../histogram/DateHistogramAggregator.java | 14 + .../DateHistogramAggregatorFactory.java | 7 + .../DateRangeHistogramAggregator.java | 5 + .../histogram/InternalDateHistogram.java | 46 +- .../support/AggregationContext.java | 14 + .../histogram/InternalDateHistogramTests.java | 5 +- .../test/downsample/30_date_histogram.yml | 516 +++++++++++++++++- ...StreamLifecycleDownsampleDisruptionIT.java | 1 + .../downsample/TransportDownsampleAction.java | 3 + .../xpack/ilm/actions/DownsampleActionIT.java | 5 + 18 files changed, 679 insertions(+), 56 deletions(-) create mode 100644 docs/changelog/103511.yaml diff --git a/docs/changelog/103511.yaml b/docs/changelog/103511.yaml new file mode 100644 index 0000000000000..20a48df914832 --- /dev/null +++ b/docs/changelog/103511.yaml @@ -0,0 +1,6 @@ +pr: 103511 +summary: Downsampling supports `date_histogram` with tz +area: Downsampling +type: bug +issues: + - 101309 diff --git a/docs/reference/data-streams/downsampling.asciidoc b/docs/reference/data-streams/downsampling.asciidoc index 5e31a90bfb959..cac73787fc018 100644 --- a/docs/reference/data-streams/downsampling.asciidoc +++ b/docs/reference/data-streams/downsampling.asciidoc @@ -135,7 +135,29 @@ downsampled. * For <>, only `fixed_intervals` (and not calendar-aware intervals) are supported. -* Only Coordinated Universal Time (UTC) date-times are supported. +* Timezone support comes with caveats: + +** Date histograms at intervals that are multiples of an hour are based on +values generated at UTC. This works well for timezones that are on the hour, e.g. ++5:00 or -3:00, but requires offsetting the reported time buckets, e.g. +`2020-01-01T10:30:00.000` instead of `2020-03-07T10:00:00.000` for +timezone +5:30 (India), if downsampling aggregates values per hour. In this case, +the results include the field `downsampled_results_offset: true`, to indicate that +the time buckets are shifted. This can be avoided if a downsampling interval of 15 +minutes is used, as it allows properly calculating hourly values for the shifted +buckets. + +** Date histograms at intervals that are multiples of a day are similarly +affected, in case downsampling aggregates values per day. In this case, the +beginning of each day is always calculated at UTC when generated the downsampled +values, so the time buckets need to be shifted, e.g. reported as +`2020-03-07T19:00:00.000` instead of `2020-03-07T00:00:00.000` for timezone `America/New_York`. +The field `downsampled_results_offset: true` is added in this case too. + +** Daylight savings and similar peculiarities around timezones affect +reported results, as <> +for date histogram aggregation. Besides, downsampling at daily interval +hinders tracking any information related to daylight savings changes. [discrete] [[downsampling-restrictions]] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 5b2819f04ec24..fc43d47f29471 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -183,6 +183,7 @@ static TransportVersion def(int id) { public static final TransportVersion HOT_THREADS_AS_BYTES = def(8_571_00_0); public static final TransportVersion ML_INFERENCE_REQUEST_INPUT_TYPE_ADDED = def(8_572_00_0); public static final TransportVersion ESQL_ENRICH_POLICY_CCQ_MODE = def(8_573_00_0); + public static final TransportVersion DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ = def(8_574_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 742b52365c8d7..83b1c48e69eb9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -26,7 +26,6 @@ import org.elasticsearch.cluster.routing.allocation.IndexMetadataUpdater; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamInput; @@ -138,14 +137,9 @@ public class IndexMetadata implements Diffable, ToXContentFragmen EnumSet.of(ClusterBlockLevel.WRITE) ); - // TODO: refactor this method after adding more downsampling metadata - public boolean isDownsampledIndex() { - final String sourceIndex = settings.get(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME_KEY); - final String indexDownsamplingStatus = settings.get(IndexMetadata.INDEX_DOWNSAMPLE_STATUS_KEY); - final boolean downsamplingSuccess = DownsampleTaskStatus.SUCCESS.name() - .toLowerCase(Locale.ROOT) - .equals(indexDownsamplingStatus != null ? indexDownsamplingStatus.toLowerCase(Locale.ROOT) : DownsampleTaskStatus.UNKNOWN); - return Strings.isNullOrEmpty(sourceIndex) == false && downsamplingSuccess; + @Nullable + public String getDownsamplingInterval() { + return settings.get(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL_KEY); } public enum State implements Writeable { @@ -1235,6 +1229,7 @@ public Index getResizeSourceIndex() { public static final String INDEX_DOWNSAMPLE_ORIGIN_UUID_KEY = "index.downsample.origin.uuid"; public static final String INDEX_DOWNSAMPLE_STATUS_KEY = "index.downsample.status"; + public static final String INDEX_DOWNSAMPLE_INTERVAL_KEY = "index.downsample.interval"; public static final Setting INDEX_DOWNSAMPLE_SOURCE_UUID = Setting.simpleString( INDEX_DOWNSAMPLE_SOURCE_UUID_KEY, Property.IndexScope, @@ -1277,6 +1272,14 @@ public String toString() { Property.InternalIndex ); + public static final Setting INDEX_DOWNSAMPLE_INTERVAL = Setting.simpleString( + INDEX_DOWNSAMPLE_INTERVAL_KEY, + "", + Property.IndexScope, + Property.InternalIndex, + Property.PrivateIndex + ); + // LIFECYCLE_NAME is here an as optimization, see LifecycleSettings.LIFECYCLE_NAME and // LifecycleSettings.LIFECYCLE_NAME_SETTING for the 'real' version public static final String LIFECYCLE_NAME = "index.lifecycle.name"; diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index f7c9e72d36326..c1b8d51c255db 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -76,6 +76,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME, IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_UUID, IndexMetadata.INDEX_DOWNSAMPLE_STATUS, + IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING, diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index 30a85f4941105..9a8800c05bdb2 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -285,6 +285,13 @@ public IndexSettings getIndexSettings() { return indexSettings; } + /** + * Returns the MappingLookup for the queried index. + */ + public MappingLookup getMappingLookup() { + return mappingLookup; + } + /** * Given an index pattern, checks whether it matches against the current shard. The pattern * may represent a fully qualified index name if the search targets remote shards. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 4f94e2061caa1..c164067ea6504 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -36,6 +37,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.SimpleTimeZone; import java.util.function.Consumer; import static java.util.Map.entry; @@ -406,23 +408,46 @@ protected ValuesSourceAggregatorFactory innerBuild( ) throws IOException { final DateIntervalWrapper.IntervalTypeEnum dateHistogramIntervalType = dateHistogramInterval.getIntervalType(); - if (context.getIndexSettings().getIndexMetadata().isDownsampledIndex() - && DateIntervalWrapper.IntervalTypeEnum.CALENDAR.equals(dateHistogramIntervalType)) { - throw new IllegalArgumentException( - config.getDescription() - + " is not supported for aggregation [" - + getName() - + "] with interval type [" - + dateHistogramIntervalType.getPreferredName() - + "]" - ); - } - + boolean downsampledResultsOffset = false; final ZoneId tz = timeZone(); - if (context.getIndexSettings().getIndexMetadata().isDownsampledIndex() && tz != null && ZoneId.of("UTC").equals(tz) == false) { - throw new IllegalArgumentException( - config.getDescription() + " is not supported for aggregation [" + getName() + "] with timezone [" + tz + "]" - ); + + String downsamplingInterval = context.getIndexSettings().getIndexMetadata().getDownsamplingInterval(); + if (downsamplingInterval != null) { + if (DateIntervalWrapper.IntervalTypeEnum.CALENDAR.equals(dateHistogramIntervalType)) { + throw new IllegalArgumentException( + config.getDescription() + + " is not supported for aggregation [" + + getName() + + "] with interval type [" + + dateHistogramIntervalType.getPreferredName() + + "]" + ); + } + + // Downsampled data in time-series indexes contain aggregated values that get calculated over UTC-based intervals. + // When they get aggregated using a different timezone, the resulting buckets may need to be offset to account for + // the difference between UTC (where stored data refers to) and the requested timezone. For instance: + // a. A TZ shifted by -01:15 over hourly downsampled data will lead to buckets with times XX:45, instead of XX:00 + // b. A TZ shifted by +07:00 over daily downsampled data will lead to buckets with times 07:00, instead of 00:00 + // c. Intervals over DST are approximate, not including gaps in time buckets. This applies to date histogram aggregation in + // general. + if (tz != null && ZoneId.of("UTC").equals(tz) == false && field().equals(DataStreamTimestampFieldMapper.DEFAULT_PATH)) { + + // Get the downsampling interval. + DateHistogramInterval interval = new DateHistogramInterval(downsamplingInterval); + long downsamplingResolution = interval.estimateMillis(); + long aggregationResolution = dateHistogramInterval.getAsFixedInterval().estimateMillis(); + + // If the aggregation resolution is not a multiple of the downsampling resolution, the reported time for each + // bucket needs to be shifted by the mod - in addition to rounding that's applied as usual. + // Note that the aggregation resolution gets shifted to match the specified timezone. Timezone.getOffset() normally expects + // a date but it can also process an offset (interval) in milliseconds as it uses the Unix epoch for reference. + long aggregationOffset = SimpleTimeZone.getTimeZone(tz).getOffset(aggregationResolution) % downsamplingResolution; + if (aggregationOffset != 0) { + downsampledResultsOffset = true; + offset += aggregationOffset; + } + } } DateHistogramAggregationSupplier aggregatorSupplier = context.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, config); @@ -473,6 +498,7 @@ protected ValuesSourceAggregatorFactory innerBuild( order, keyed, minDocCount, + downsampledResultsOffset, rounding, roundedBounds, roundedHardBounds, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationSupplier.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationSupplier.java index 1529d0fab6cc9..b3f002e8b83a7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationSupplier.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationSupplier.java @@ -29,6 +29,7 @@ Aggregator build( BucketOrder order, boolean keyed, long minDocCount, + boolean downsampledResultsOffset, @Nullable LongBounds extendedBounds, @Nullable LongBounds hardBounds, ValuesSourceConfig valuesSourceConfig, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index ea08d5960d704..8f5323dfc9d2b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -79,6 +79,7 @@ public static Aggregator build( BucketOrder order, boolean keyed, long minDocCount, + boolean downsampledResultsOffset, @Nullable LongBounds extendedBounds, @Nullable LongBounds hardBounds, ValuesSourceConfig valuesSourceConfig, @@ -96,6 +97,7 @@ public static Aggregator build( order, keyed, minDocCount, + downsampledResultsOffset, extendedBounds, hardBounds, valuesSourceConfig, @@ -115,6 +117,7 @@ public static Aggregator build( order, keyed, minDocCount, + downsampledResultsOffset, extendedBounds, hardBounds, valuesSourceConfig, @@ -133,6 +136,7 @@ private static FromDateRange adaptIntoRangeOrNull( BucketOrder order, boolean keyed, long minDocCount, + boolean downsampledResultsOffset, @Nullable LongBounds extendedBounds, @Nullable LongBounds hardBounds, ValuesSourceConfig valuesSourceConfig, @@ -191,6 +195,7 @@ private static FromDateRange adaptIntoRangeOrNull( minDocCount, extendedBounds, keyed, + downsampledResultsOffset, fixedRoundingPoints ); } @@ -227,6 +232,7 @@ private static RangeAggregator.Range[] ranges(LongBounds hardBounds, long[] fixe private final boolean keyed; private final long minDocCount; + private final boolean downsampledResultsOffset; private final LongBounds extendedBounds; private final LongBounds hardBounds; @@ -240,6 +246,7 @@ private static RangeAggregator.Range[] ranges(LongBounds hardBounds, long[] fixe BucketOrder order, boolean keyed, long minDocCount, + boolean downsampledResultsOffset, @Nullable LongBounds extendedBounds, @Nullable LongBounds hardBounds, ValuesSourceConfig valuesSourceConfig, @@ -255,6 +262,7 @@ private static RangeAggregator.Range[] ranges(LongBounds hardBounds, long[] fixe order.validate(this); this.keyed = keyed; this.minDocCount = minDocCount; + this.downsampledResultsOffset = downsampledResultsOffset; this.extendedBounds = extendedBounds; this.hardBounds = hardBounds; // TODO: Stop using null here @@ -328,6 +336,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I emptyBucketInfo, formatter, keyed, + downsampledResultsOffset, metadata() ); }); @@ -347,6 +356,7 @@ public InternalAggregation buildEmptyAggregation() { emptyBucketInfo, formatter, keyed, + downsampledResultsOffset, metadata() ); } @@ -392,6 +402,7 @@ static class FromDateRange extends AdaptingAggregator implements SizedBucketAggr private final long minDocCount; private final LongBounds extendedBounds; private final boolean keyed; + private final boolean downsampledResultsOffset; private final long[] fixedRoundingPoints; FromDateRange( @@ -405,6 +416,7 @@ static class FromDateRange extends AdaptingAggregator implements SizedBucketAggr long minDocCount, LongBounds extendedBounds, boolean keyed, + boolean downsampledResultsOffset, long[] fixedRoundingPoints ) throws IOException { super(parent, subAggregators, delegate); @@ -416,6 +428,7 @@ static class FromDateRange extends AdaptingAggregator implements SizedBucketAggr this.minDocCount = minDocCount; this.extendedBounds = extendedBounds; this.keyed = keyed; + this.downsampledResultsOffset = downsampledResultsOffset; this.fixedRoundingPoints = fixedRoundingPoints; } @@ -454,6 +467,7 @@ protected InternalAggregation adapt(InternalAggregation delegateResult) { emptyBucketInfo, format, keyed, + downsampledResultsOffset, range.getMetadata() ); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index 1a75766c40a6b..bb12f4588ef80 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -52,6 +52,7 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { order, keyed, minDocCount, + downsampledResultsOffset, extendedBounds, hardBounds, valuesSourceConfig, @@ -71,6 +72,7 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { order, keyed, minDocCount, + downsampledResultsOffset, extendedBounds, hardBounds, valuesSourceConfig, @@ -88,6 +90,7 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { private final BucketOrder order; private final boolean keyed; private final long minDocCount; + private final boolean downsampledResultsOffset; private final LongBounds extendedBounds; private final LongBounds hardBounds; private final Rounding rounding; @@ -98,6 +101,7 @@ public DateHistogramAggregatorFactory( BucketOrder order, boolean keyed, long minDocCount, + boolean downsampledResultsOffset, Rounding rounding, LongBounds extendedBounds, LongBounds hardBounds, @@ -111,6 +115,7 @@ public DateHistogramAggregatorFactory( this.aggregatorSupplier = aggregationSupplier; this.order = order; this.keyed = keyed; + this.downsampledResultsOffset = downsampledResultsOffset; this.minDocCount = minDocCount; this.extendedBounds = extendedBounds; this.hardBounds = hardBounds; @@ -139,6 +144,7 @@ protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound c order, keyed, minDocCount, + downsampledResultsOffset, extendedBounds, hardBounds, config, @@ -159,6 +165,7 @@ protected Aggregator createUnmapped(Aggregator parent, Map metad order, keyed, minDocCount, + downsampledResultsOffset, extendedBounds, hardBounds, config, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java index 34720f3f2f643..5fe44aa694cc5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregator.java @@ -59,6 +59,7 @@ class DateRangeHistogramAggregator extends BucketsAggregator { private final boolean keyed; private final long minDocCount; + private final boolean downsampledResultsOffset; private final LongBounds extendedBounds; private final LongBounds hardBounds; @@ -71,6 +72,7 @@ class DateRangeHistogramAggregator extends BucketsAggregator { BucketOrder order, boolean keyed, long minDocCount, + boolean downsampledResultsOffset, @Nullable LongBounds extendedBounds, @Nullable LongBounds hardBounds, ValuesSourceConfig valuesSourceConfig, @@ -87,6 +89,7 @@ class DateRangeHistogramAggregator extends BucketsAggregator { order.validate(this); this.keyed = keyed; this.minDocCount = minDocCount; + this.downsampledResultsOffset = downsampledResultsOffset; this.extendedBounds = extendedBounds; this.hardBounds = hardBounds; // TODO: Stop using null here @@ -197,6 +200,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I emptyBucketInfo, formatter, keyed, + downsampledResultsOffset, metadata() ); } @@ -217,6 +221,7 @@ public InternalAggregation buildEmptyAggregation() { emptyBucketInfo, formatter, keyed, + downsampledResultsOffset, metadata() ); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 4ffc9abdc2202..449326b1d69bb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -202,6 +203,7 @@ public int hashCode() { private final BucketOrder order; private final DocValueFormat format; private final boolean keyed; + private final boolean downsampledResultsOffset; private final long minDocCount; private final long offset; final EmptyBucketInfo emptyBucketInfo; @@ -215,6 +217,7 @@ public int hashCode() { EmptyBucketInfo emptyBucketInfo, DocValueFormat formatter, boolean keyed, + boolean downsampledResultsOffset, Map metadata ) { super(name, metadata); @@ -226,6 +229,7 @@ public int hashCode() { this.emptyBucketInfo = emptyBucketInfo; this.format = formatter; this.keyed = keyed; + this.downsampledResultsOffset = downsampledResultsOffset; } /** @@ -243,6 +247,11 @@ public InternalDateHistogram(StreamInput in) throws IOException { offset = in.readLong(); format = in.readNamedWriteable(DocValueFormat.class); keyed = in.readBoolean(); + if (in.getTransportVersion().onOrAfter(TransportVersions.DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ)) { + downsampledResultsOffset = in.readBoolean(); + } else { + downsampledResultsOffset = false; + } buckets = in.readCollectionAsList(stream -> new Bucket(stream, keyed, format)); } @@ -256,6 +265,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeLong(offset); out.writeNamedWriteable(format); out.writeBoolean(keyed); + if (out.getTransportVersion().onOrAfter(TransportVersions.DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ)) { + out.writeBoolean(downsampledResultsOffset); + } out.writeCollection(buckets); } @@ -283,7 +295,18 @@ BucketOrder getOrder() { @Override public InternalDateHistogram create(List buckets) { - return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, format, keyed, metadata); + return new InternalDateHistogram( + name, + buckets, + order, + minDocCount, + offset, + emptyBucketInfo, + format, + keyed, + downsampledResultsOffset, + metadata + ); } @Override @@ -508,6 +531,7 @@ public InternalAggregation reduce(List aggregations, Aggreg emptyBucketInfo, format, keyed, + downsampledResultsOffset, getMetadata() ); } @@ -523,6 +547,7 @@ public InternalAggregation finalizeSampling(SamplingContext samplingContext) { emptyBucketInfo, format, keyed, + downsampledResultsOffset, getMetadata() ); } @@ -542,6 +567,12 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th } else { builder.endArray(); } + if (downsampledResultsOffset) { + // Indicates that the dates reported in the buckets over downsampled indexes are offset + // to match the intervals at UTC, since downsampling always uses UTC-based intervals + // to calculate aggregated values. + builder.field("downsampled_results_offset", Boolean.TRUE); + } return builder; } @@ -570,7 +601,18 @@ public InternalAggregation createAggregation(List sourcePath(String fullName); + /** + * Returns the MappingLookup for the index, if one is initialized. + */ + @Nullable + public MappingLookup getMappingLookup() { + return null; + } + /** * Does this index have a {@code _doc_count} field in any segment? */ @@ -611,6 +620,11 @@ public Set sourcePath(String fullName) { return context.sourcePath(fullName); } + @Override + public MappingLookup getMappingLookup() { + return context.getMappingLookup(); + } + @Override public void close() { /* diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java index 093ccc7181767..512784353a099 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java @@ -109,7 +109,7 @@ private InternalDateHistogram createTestInstance( } } BucketOrder order = BucketOrder.key(randomBoolean()); - return new InternalDateHistogram(name, buckets, order, minDocCount, 0L, emptyBucketInfo, format, keyed, metadata); + return new InternalDateHistogram(name, buckets, order, minDocCount, 0L, emptyBucketInfo, format, keyed, false, metadata); } @Override @@ -210,7 +210,7 @@ protected InternalDateHistogram mutateInstance(InternalDateHistogram instance) { } default -> throw new AssertionError("Illegal randomisation branch"); } - return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, format, keyed, metadata); + return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, format, keyed, false, metadata); } public void testLargeReduce() { @@ -230,6 +230,7 @@ public void testLargeReduce() { ), DocValueFormat.RAW, false, + false, null ); expectReduceUsesTooManyBuckets(largeHisto, 100000); diff --git a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/30_date_histogram.yml b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/30_date_histogram.yml index b7f3ec7b8f384..831ad158deda4 100644 --- a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/30_date_histogram.yml +++ b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/30_date_histogram.yml @@ -13,8 +13,8 @@ setup: mode: time_series routing_path: [ uid ] time_series: - start_time: 2021-04-28T00:00:00Z - end_time: 2021-04-29T00:00:00Z + start_time: 2020-01-01T00:00:00Z + end_time: 2022-01-01T00:00:00Z mappings: properties: "@timestamp": @@ -39,12 +39,6 @@ setup: - '{ "index": {} }' - '{ "@timestamp": "2021-04-28T18:55:00Z", "uid": "004", "total_memory_used": 120770 }' - - do: - indices.put_settings: - index: test - body: - index.blocks.write: true - --- "Date histogram aggregation on time series index and rollup indices": - skip: @@ -52,6 +46,12 @@ setup: reason: "rollup: unsupported aggregations errors added in 8.5.0" features: close_to + - do: + indices.put_settings: + index: test + body: + index.blocks.write: true + - do: indices.downsample: index: test @@ -142,24 +142,6 @@ setup: - match: { aggregations.date_histogram.buckets.0.key_as_string: "2021-04-28T18:00:00.000Z"} - match: { aggregations.date_histogram.buckets.0.key: 1619632800000 } - # date histogram aggregation with non-utc timezone on rollup index not supported - - do: - catch: bad_request - search: - index: test-downsample - body: - size: 0 - aggs: - date_histogram: - date_histogram: - field: "@timestamp" - fixed_interval: 1h - time_zone: "America/New_York" - - - match: { status: 400 } - - match: { error.root_cause.0.type: illegal_argument_exception } - - match: { error.root_cause.0.reason: "Field [@timestamp] of type [date] is not supported for aggregation [date_histogram] with timezone [America/New_York]" } - # date histogram aggregation with non-utc timezone on time series index supported - do: search: @@ -247,3 +229,485 @@ setup: - match: { _shards.failures.0.index: "test-downsample" } - match: { _shards.failures.0.reason.type: illegal_argument_exception } - match: { _shards.failures.0.reason.reason: "Field [@timestamp] of type [date] is not supported for aggregation [date_histogram] with interval type [calendar_interval]" } + +--- +timezone support - 15m: + - skip: + version: " - 8.12.99" + reason: "timezone support added in 8.13" + + - do: + bulk: + refresh: true + index: test + body: + # Check timezone support + - '{ "index": {} }' + - '{ "@timestamp": "2021-04-28T10:05:00Z", "uid": "001", "total_memory_used": 106780 }' + - '{ "index": {} }' + - '{ "@timestamp": "2021-04-28T10:55:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2021-04-28T11:05:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2021-04-28T11:55:00Z", "uid": "001", "total_memory_used": 109009 }' + - '{ "index": {} }' + - '{ "@timestamp": "2021-04-28T12:05:00Z", "uid": "001", "total_memory_used": 120770 }' + # Check daylight savings + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T03:00:00Z", "uid": "001", "total_memory_used": 106780 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T03:50:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T04:00:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T04:50:00Z", "uid": "001", "total_memory_used": 109009 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T05:00:00Z", "uid": "001", "total_memory_used": 109009 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T06:00:00Z", "uid": "001", "total_memory_used": 106780 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T07:50:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T08:00:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T08:50:00Z", "uid": "001", "total_memory_used": 109009 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T09:00:00Z", "uid": "001", "total_memory_used": 109009 }' + + - do: + indices.put_settings: + index: test + body: + index.blocks.write: true + + - do: + indices.downsample: + index: test + target_index: test-downsample + body: > + { + "fixed_interval": "15m" + } + + - is_true: acknowledged + + - do: + search: + index: test-downsample + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: "@timestamp" + fixed_interval: 1h + time_zone: "America/New_York" + query: + range: + "@timestamp": + gt: "2021-04-28T15:00:00Z" + lt: "2021-04-29T15:00:00Z" + + - match: { hits.total.value: 4 } + - length: { aggregations.date_histogram.buckets: 1 } + - match: { aggregations.date_histogram.buckets.0.doc_count: 4 } + - match: { aggregations.date_histogram.buckets.0.key_as_string: "2021-04-28T14:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.0.key: 1619632800000 } + - is_false: aggregations.date_histogram.downsampled_results_offset + + - do: + search: + index: test-downsample + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: "@timestamp" + fixed_interval: 1h + time_zone: "-01:15" + query: + range: + "@timestamp": + gt: "2021-04-27T15:00:00Z" + lt: "2021-04-28T15:00:00Z" + + - match: { hits.total.value: 5 } + - length: { aggregations.date_histogram.buckets: 3 } + - match: { aggregations.date_histogram.buckets.0.doc_count: 1 } + - match: { aggregations.date_histogram.buckets.0.key_as_string: "2021-04-28T08:00:00.000-01:15" } + - match: { aggregations.date_histogram.buckets.1.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.1.key_as_string: "2021-04-28T09:00:00.000-01:15" } + - match: { aggregations.date_histogram.buckets.2.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.2.key_as_string: "2021-04-28T10:00:00.000-01:15" } + - is_false: aggregations.date_histogram.downsampled_results_offset + + - do: + search: + index: test-downsample + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: "@timestamp" + fixed_interval: 1h + time_zone: "+02:15" + query: + range: + "@timestamp": + gt: "2021-04-27T15:00:00Z" + lt: "2021-04-28T15:00:00Z" + + - match: { hits.total.value: 5 } + - length: { aggregations.date_histogram.buckets: 3 } + - match: { aggregations.date_histogram.buckets.0.doc_count: 1 } + - match: { aggregations.date_histogram.buckets.0.key_as_string: "2021-04-28T12:00:00.000+02:15" } + - match: { aggregations.date_histogram.buckets.1.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.1.key_as_string: "2021-04-28T13:00:00.000+02:15" } + - match: { aggregations.date_histogram.buckets.2.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.2.key_as_string: "2021-04-28T14:00:00.000+02:15" } + - is_false: aggregations.date_histogram.downsampled_results_offset + + # Check timezone with daylight savings + - do: + search: + index: test-downsample + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: "@timestamp" + fixed_interval: 1h + time_zone: "America/New_York" + query: + range: + "@timestamp": + gt: "2020-03-08T00:00:00Z" + lt: "2020-03-10T00:00:00Z" + + - match: { hits.total.value: 10 } + - length: { aggregations.date_histogram.buckets: 7 } + - match: { aggregations.date_histogram.buckets.0.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.0.key_as_string: "2020-03-08T23:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.1.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.1.key_as_string: "2020-03-09T00:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.2.doc_count: 1 } + - match: { aggregations.date_histogram.buckets.2.key_as_string: "2020-03-09T01:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.3.doc_count: 1 } + - match: { aggregations.date_histogram.buckets.3.key_as_string: "2020-03-09T02:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.4.doc_count: 1 } + - match: { aggregations.date_histogram.buckets.4.key_as_string: "2020-03-09T03:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.5.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.5.key_as_string: "2020-03-09T04:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.6.doc_count: 1 } + - match: { aggregations.date_histogram.buckets.6.key_as_string: "2020-03-09T05:00:00.000-04:00" } + - is_false: aggregations.date_histogram.downsampled_results_offset + + - do: + search: + index: test-downsample + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: "@timestamp" + fixed_interval: 1d + time_zone: "America/New_York" + query: + range: + "@timestamp": + gt: "2020-03-08T00:00:00Z" + lt: "2020-03-10T00:00:00Z" + + - match: { hits.total.value: 10 } + - length: { aggregations.date_histogram.buckets: 2 } + - match: { aggregations.date_histogram.buckets.0.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.0.key_as_string: "2020-03-08T00:00:00.000-05:00" } + - match: { aggregations.date_histogram.buckets.1.doc_count: 8 } + - match: { aggregations.date_histogram.buckets.1.key_as_string: "2020-03-09T00:00:00.000-04:00" } + - is_false: aggregations.date_histogram.downsampled_results_offset + +--- +timezone support - 1h: + - skip: + version: " - 8.12.99" + reason: "timezone support added in 8.13" + + - do: + bulk: + refresh: true + index: test + body: + # Check timezone support + - '{ "index": {} }' + - '{ "@timestamp": "2021-04-28T10:05:00Z", "uid": "001", "total_memory_used": 106780 }' + - '{ "index": {} }' + - '{ "@timestamp": "2021-04-28T10:55:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2021-04-28T11:05:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2021-04-28T11:55:00Z", "uid": "001", "total_memory_used": 109009 }' + - '{ "index": {} }' + - '{ "@timestamp": "2021-04-28T12:05:00Z", "uid": "001", "total_memory_used": 120770 }' + # Check daylight savings + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T03:00:00Z", "uid": "001", "total_memory_used": 106780 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T03:50:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T04:00:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T04:50:00Z", "uid": "001", "total_memory_used": 109009 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T05:00:00Z", "uid": "001", "total_memory_used": 109009 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T06:00:00Z", "uid": "001", "total_memory_used": 106780 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T07:50:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T08:00:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T08:50:00Z", "uid": "001", "total_memory_used": 109009 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T09:00:00Z", "uid": "001", "total_memory_used": 109009 }' + + - do: + indices.put_settings: + index: test + body: + index.blocks.write: true + + - do: + indices.downsample: + index: test + target_index: test-downsample + body: > + { + "fixed_interval": "1h" + } + + - is_true: acknowledged + + - do: + search: + index: test-downsample + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: "@timestamp" + fixed_interval: 1h + time_zone: "America/New_York" + query: + range: + "@timestamp": + gt: "2021-04-28T15:00:00Z" + lt: "2021-04-29T15:00:00Z" + + - match: { hits.total.value: 4 } + - length: { aggregations.date_histogram.buckets: 1 } + - match: { aggregations.date_histogram.buckets.0.doc_count: 4 } + - match: { aggregations.date_histogram.buckets.0.key_as_string: "2021-04-28T14:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.0.key: 1619632800000 } + - is_false: aggregations.date_histogram.downsampled_results_offset + + - do: + search: + index: test-downsample + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: "@timestamp" + fixed_interval: 1h + time_zone: "-01:15" + query: + range: + "@timestamp": + gt: "2021-04-27T15:00:00Z" + lt: "2021-04-28T15:00:00Z" + + - match: { hits.total.value: 3 } + - match: { aggregations.date_histogram.downsampled_results_offset: true } + - length: { aggregations.date_histogram.buckets: 3 } + - match: { aggregations.date_histogram.buckets.0.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.0.key_as_string: "2021-04-28T08:45:00.000-01:15" } + - match: { aggregations.date_histogram.buckets.1.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.1.key_as_string: "2021-04-28T09:45:00.000-01:15" } + - match: { aggregations.date_histogram.buckets.2.doc_count: 1 } + - match: { aggregations.date_histogram.buckets.2.key_as_string: "2021-04-28T10:45:00.000-01:15" } + + - do: + search: + index: test-downsample + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: "@timestamp" + fixed_interval: 1h + time_zone: "+02:15" + query: + range: + "@timestamp": + gt: "2021-04-27T15:00:00Z" + lt: "2021-04-28T15:00:00Z" + + - match: { hits.total.value: 3 } + - match: { aggregations.date_histogram.downsampled_results_offset: true } + - length: { aggregations.date_histogram.buckets: 3 } + - match: { aggregations.date_histogram.buckets.0.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.0.key_as_string: "2021-04-28T12:15:00.000+02:15" } + - match: { aggregations.date_histogram.buckets.1.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.1.key_as_string: "2021-04-28T13:15:00.000+02:15" } + - match: { aggregations.date_histogram.buckets.2.doc_count: 1 } + - match: { aggregations.date_histogram.buckets.2.key_as_string: "2021-04-28T14:15:00.000+02:15" } + + # Check timezone with daylight savings + - do: + search: + index: test-downsample + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: "@timestamp" + fixed_interval: 1h + time_zone: "America/New_York" + query: + range: + "@timestamp": + gt: "2020-03-08T00:00:00Z" + lt: "2020-03-10T00:00:00Z" + + - match: { hits.total.value: 7 } + - length: { aggregations.date_histogram.buckets: 7 } + - match: { aggregations.date_histogram.buckets.0.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.0.key_as_string: "2020-03-08T23:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.1.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.1.key_as_string: "2020-03-09T00:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.2.doc_count: 1 } + - match: { aggregations.date_histogram.buckets.2.key_as_string: "2020-03-09T01:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.3.doc_count: 1 } + - match: { aggregations.date_histogram.buckets.3.key_as_string: "2020-03-09T02:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.4.doc_count: 1 } + - match: { aggregations.date_histogram.buckets.4.key_as_string: "2020-03-09T03:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.5.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.5.key_as_string: "2020-03-09T04:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.6.doc_count: 1 } + - match: { aggregations.date_histogram.buckets.6.key_as_string: "2020-03-09T05:00:00.000-04:00" } + - is_false: aggregations.date_histogram.downsampled_results_offset + + - do: + search: + index: test-downsample + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: "@timestamp" + fixed_interval: 1d + time_zone: "America/New_York" + query: + range: + "@timestamp": + gt: "2020-03-08T00:00:00Z" + lt: "2020-03-10T00:00:00Z" + + - match: { hits.total.value: 7 } + - length: { aggregations.date_histogram.buckets: 2 } + - match: { aggregations.date_histogram.buckets.0.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.0.key_as_string: "2020-03-08T00:00:00.000-05:00" } + - match: { aggregations.date_histogram.buckets.1.doc_count: 8 } + - match: { aggregations.date_histogram.buckets.1.key_as_string: "2020-03-09T00:00:00.000-04:00" } + - is_false: aggregations.date_histogram.downsampled_results_offset + +--- +timezone support - 1d: + - skip: + version: " - 8.12.99" + reason: "timezone support added in 8.13" + + - do: + bulk: + refresh: true + index: test + body: + # Check daylight savings + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-08T03:00:00Z", "uid": "001", "total_memory_used": 106780 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-08T03:50:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T03:00:00Z", "uid": "001", "total_memory_used": 106780 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T03:50:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T04:00:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T04:50:00Z", "uid": "001", "total_memory_used": 109009 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T05:00:00Z", "uid": "001", "total_memory_used": 109009 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T06:00:00Z", "uid": "001", "total_memory_used": 106780 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T07:50:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T08:00:00Z", "uid": "001", "total_memory_used": 110450 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-09T08:50:00Z", "uid": "001", "total_memory_used": 109009 }' + - '{ "index": {} }' + - '{ "@timestamp": "2020-03-10T09:00:00Z", "uid": "001", "total_memory_used": 109009 }' + + - do: + indices.put_settings: + index: test + body: + index.blocks.write: true + + - do: + indices.downsample: + index: test + target_index: test-downsample + body: > + { + "fixed_interval": "1d" + } + + - is_true: acknowledged + + # Check timezone with daylight savings + - do: + search: + index: test-downsample + body: + size: 0 + aggs: + date_histogram: + date_histogram: + field: "@timestamp" + fixed_interval: 1d + time_zone: "America/New_York" + query: + range: + "@timestamp": + gt: "2020-03-01T00:00:00Z" + lt: "2020-03-30T00:00:00Z" + + - match: { hits.total.value: 3 } + - match: { aggregations.date_histogram.downsampled_results_offset: true } + - length: { aggregations.date_histogram.buckets: 3 } + - match: { aggregations.date_histogram.buckets.0.doc_count: 2 } + - match: { aggregations.date_histogram.buckets.0.key_as_string: "2020-03-07T19:00:00.000-05:00" } + - match: { aggregations.date_histogram.buckets.1.doc_count: 9 } + - match: { aggregations.date_histogram.buckets.1.key_as_string: "2020-03-08T19:00:00.000-04:00" } + - match: { aggregations.date_histogram.buckets.2.doc_count: 1 } + - match: { aggregations.date_histogram.buckets.2.key_as_string: "2020-03-09T19:00:00.000-04:00" } diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java index f248da8a7842a..e0d1fa45a80c3 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java @@ -139,6 +139,7 @@ public boolean validateClusterForming() { Settings indexSettings = getSettingsResponse.getIndexToSettings().get(targetIndex); assertThat(indexSettings, is(notNullValue())); assertThat(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.get(indexSettings), is(IndexMetadata.DownsampleTaskStatus.SUCCESS)); + assertEquals("5m", IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL.get(indexSettings)); } catch (Exception e) { throw new AssertionError(e); } diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index e7bd2f0c0fb27..5cceffd0f4818 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -115,6 +115,7 @@ public class TransportDownsampleAction extends AcknowledgedTransportMasterNodeAc private final IndexScopedSettings indexScopedSettings; private final ThreadContext threadContext; private final PersistentTasksService persistentTasksService; + private String downsamplingInterval; private static final Set FORBIDDEN_SETTINGS = Set.of( IndexSettings.DEFAULT_PIPELINE.getKey(), @@ -284,6 +285,7 @@ protected void masterOperation( // Validate downsampling interval validateDownsamplingInterval(mapperService, request.getDownsampleConfig()); + downsamplingInterval = request.getDownsampleConfig().getInterval().toString(); final List dimensionFields = new ArrayList<>(); final List metricFields = new ArrayList<>(); @@ -888,6 +890,7 @@ public ClusterState execute(ClusterState currentState) { Settings.builder() .put(downsampleIndex.getSettings()) .put(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey(), DownsampleTaskStatus.SUCCESS) + .put(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL.getKey(), downsamplingInterval) .build(), downsampleIndexName ); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java index d446873bd1f75..a6fa7cd3ffbc6 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java @@ -213,6 +213,7 @@ public void testRollupIndex() throws Exception { assertEquals(index, settings.get(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.getKey())); assertEquals(policy, settings.get(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey())); assertEquals(DownsampleTaskStatus.SUCCESS.toString(), settings.get(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey())); + assertEquals(fixedInterval.toString(), settings.get(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL.getKey())); }); assertBusy( () -> assertTrue("Alias [" + alias + "] does not point to index [" + rollupIndex + "]", aliasExists(rollupIndex, alias)) @@ -299,6 +300,7 @@ public void testRollupIndexInTheHotPhaseAfterRollover() throws Exception { assertEquals(originalIndex, settings.get(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.getKey())); assertEquals(policy, settings.get(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey())); assertEquals(DownsampleTaskStatus.SUCCESS.toString(), settings.get(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey())); + assertEquals(fixedInterval.toString(), settings.get(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL.getKey())); }); } @@ -345,6 +347,7 @@ public void testTsdbDataStreams() throws Exception { assertEquals(backingIndexName, settings.get(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.getKey())); assertEquals(policy, settings.get(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey())); assertEquals(DownsampleTaskStatus.SUCCESS.toString(), settings.get(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey())); + assertEquals(fixedInterval.toString(), settings.get(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL.getKey())); }); } @@ -478,6 +481,7 @@ public void testDownsampleTwice() throws Exception { assertEquals(downsampleIndexName, settings.get(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.getKey())); assertEquals(DownsampleTaskStatus.SUCCESS.toString(), settings.get(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey())); assertEquals(policy, settings.get(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey())); + assertEquals("1h", settings.get(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL.getKey())); }, 60, TimeUnit.SECONDS); } catch (AssertionError ae) { if (indexExists(firstBackingIndex)) { @@ -559,6 +563,7 @@ public void testDownsampleTwiceSameInterval() throws Exception { assertEquals(firstBackingIndex, settings.get(IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME.getKey())); assertEquals(firstBackingIndex, settings.get(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.getKey())); assertEquals(DownsampleTaskStatus.SUCCESS.toString(), settings.get(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey())); + assertEquals("5m", settings.get(IndexMetadata.INDEX_DOWNSAMPLE_INTERVAL.getKey())); assertEquals(policy, settings.get(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey())); }, 60, TimeUnit.SECONDS); From 83565aceffb714806ee9b0f2943fdc6d237b9fe9 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Tue, 16 Jan 2024 09:53:57 +0100 Subject: [PATCH 37/95] ESQL: Fix rounding error in BlockBenchmark (#104041) Round doubles before summing to avoid errors due to wrong checksum. --- .../benchmark/compute/operator/BlockBenchmark.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java index e0281dbb856d4..49603043e7bcc 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java @@ -720,7 +720,7 @@ private static long computeBytesRefCheckSum(BytesRefBlock block, int[] traversal } private static long computeDoubleCheckSum(DoubleBlock block, int[] traversalOrder) { - double sum = 0; + long sum = 0; for (int position : traversalOrder) { if (block.isNull(position)) { @@ -729,11 +729,12 @@ private static long computeDoubleCheckSum(DoubleBlock block, int[] traversalOrde int start = block.getFirstValueIndex(position); int end = start + block.getValueCount(position); for (int i = start; i < end; i++) { - sum += block.getDouble(i); + // Use an operation that is not affected by rounding errors. Otherwise, the result may depend on the traversalOrder. + sum += (long) block.getDouble(i); } } - return (long) sum; + return sum; } private static long computeIntCheckSum(IntBlock block, int[] traversalOrder) { From b52d3c0691fcff9281e7f236544a0b2180a7c9c9 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Tue, 16 Jan 2024 10:00:12 +0100 Subject: [PATCH 38/95] ESQL: Remove obsolete comments on filtering (#104359) --- .../org/elasticsearch/compute/data/BooleanArrayBlock.java | 1 - .../org/elasticsearch/compute/data/BooleanBigArrayBlock.java | 1 - .../org/elasticsearch/compute/data/BytesRefArrayBlock.java | 1 - .../org/elasticsearch/compute/data/DoubleArrayBlock.java | 1 - .../org/elasticsearch/compute/data/DoubleBigArrayBlock.java | 1 - .../org/elasticsearch/compute/data/IntArrayBlock.java | 1 - .../org/elasticsearch/compute/data/IntBigArrayBlock.java | 1 - .../org/elasticsearch/compute/data/LongArrayBlock.java | 1 - .../org/elasticsearch/compute/data/LongBigArrayBlock.java | 1 - .../src/main/java/org/elasticsearch/compute/data/Block.java | 3 +-- .../java/org/elasticsearch/compute/data/X-ArrayBlock.java.st | 1 - .../org/elasticsearch/compute/data/X-BigArrayBlock.java.st | 1 - 12 files changed, 1 insertion(+), 13 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java index a592bd65acb3a..666f1ad926eeb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java @@ -67,7 +67,6 @@ public boolean getBoolean(int valueIndex) { @Override public BooleanBlock filter(int... positions) { - // TODO use reference counting to share the vector try (var builder = blockFactory().newBooleanBlockBuilder(positions.length)) { for (int pos : positions) { if (isNull(pos)) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java index 82a0bb364966b..a19ed24302b65 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java @@ -68,7 +68,6 @@ public boolean getBoolean(int valueIndex) { @Override public BooleanBlock filter(int... positions) { - // TODO use reference counting to share the vector try (var builder = blockFactory().newBooleanBlockBuilder(positions.length)) { for (int pos : positions) { if (isNull(pos)) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java index 7f1a1608dac5b..69e5499eaba46 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java @@ -70,7 +70,6 @@ public BytesRef getBytesRef(int valueIndex, BytesRef dest) { @Override public BytesRefBlock filter(int... positions) { - // TODO use reference counting to share the vector final BytesRef scratch = new BytesRef(); try (var builder = blockFactory().newBytesRefBlockBuilder(positions.length)) { for (int pos : positions) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java index cb5258c7ae22c..b5f5c69e0508a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java @@ -67,7 +67,6 @@ public double getDouble(int valueIndex) { @Override public DoubleBlock filter(int... positions) { - // TODO use reference counting to share the vector try (var builder = blockFactory().newDoubleBlockBuilder(positions.length)) { for (int pos : positions) { if (isNull(pos)) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java index 59bbd5a941e4b..39f959edf5ee3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java @@ -68,7 +68,6 @@ public double getDouble(int valueIndex) { @Override public DoubleBlock filter(int... positions) { - // TODO use reference counting to share the vector try (var builder = blockFactory().newDoubleBlockBuilder(positions.length)) { for (int pos : positions) { if (isNull(pos)) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java index 0d8262975c535..2afefbff16117 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java @@ -67,7 +67,6 @@ public int getInt(int valueIndex) { @Override public IntBlock filter(int... positions) { - // TODO use reference counting to share the vector try (var builder = blockFactory().newIntBlockBuilder(positions.length)) { for (int pos : positions) { if (isNull(pos)) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java index b1a1473ff4b4a..dc60ce43c04cc 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java @@ -68,7 +68,6 @@ public int getInt(int valueIndex) { @Override public IntBlock filter(int... positions) { - // TODO use reference counting to share the vector try (var builder = blockFactory().newIntBlockBuilder(positions.length)) { for (int pos : positions) { if (isNull(pos)) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java index c12033e829e6f..7491d6519fc57 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java @@ -67,7 +67,6 @@ public long getLong(int valueIndex) { @Override public LongBlock filter(int... positions) { - // TODO use reference counting to share the vector try (var builder = blockFactory().newLongBlockBuilder(positions.length)) { for (int pos : positions) { if (isNull(pos)) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java index 9eb8a527a96b5..3ff9a12991d43 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java @@ -68,7 +68,6 @@ public long getLong(int valueIndex) { @Override public LongBlock filter(int... positions) { - // TODO use reference counting to share the vector try (var builder = blockFactory().newLongBlockBuilder(positions.length)) { for (int pos : positions) { if (isNull(pos)) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java index c89a0ce260c67..5a6d7cb4a6003 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java @@ -107,8 +107,7 @@ public interface Block extends Accountable, BlockLoader.Block, NamedWriteable, R boolean mayHaveMultivaluedFields(); /** - * Creates a new block that only exposes the positions provided. Materialization of the selected positions is avoided. - * The new block may hold a reference to this block, increasing this block's reference count. + * Creates a new block that only exposes the positions provided. * @param positions the positions to retain * @return a filtered block * TODO: pass BlockFactory diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st index e24d355bf2c24..20395ff27b1b4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st @@ -83,7 +83,6 @@ $endif$ @Override public $Type$Block filter(int... positions) { - // TODO use reference counting to share the vector $if(BytesRef)$ final BytesRef scratch = new BytesRef(); $endif$ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st index 71d6005a9fc17..d65c54b5e2b24 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st @@ -68,7 +68,6 @@ public final class $Type$BigArrayBlock extends AbstractArrayBlock implements $Ty @Override public $Type$Block filter(int... positions) { - // TODO use reference counting to share the vector try (var builder = blockFactory().new$Type$BlockBuilder(positions.length)) { for (int pos : positions) { if (isNull(pos)) { From 063313ffcf9cbfd170068183eaaf549a2ab73b3a Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Tue, 16 Jan 2024 10:12:45 +0100 Subject: [PATCH 39/95] Set read timeout for fetching IMDSv2 token (#104253) Use the timeout set by AWS_METADATA_SERVICE_TIMEOUT environment variable both as connect and read timeout analogous to the AWS SDK. See https://docs.aws.amazon.com/sdkref/latest/guide/feature-ec2-instance-metadata.html Resolves #104244 --- docs/changelog/104253.yaml | 6 ++++++ plugins/discovery-ec2/build.gradle | 3 +++ .../elasticsearch/discovery/ec2/AwsEc2Utils.java | 14 ++++++++++++-- .../discovery/ec2/Ec2DiscoveryPluginTests.java | 13 +++++++++++++ 4 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/104253.yaml diff --git a/docs/changelog/104253.yaml b/docs/changelog/104253.yaml new file mode 100644 index 0000000000000..bacde751e2507 --- /dev/null +++ b/docs/changelog/104253.yaml @@ -0,0 +1,6 @@ +pr: 104253 +summary: Set read timeout for fetching IMDSv2 token +area: Discovery-Plugins +type: enhancement +issues: + - 104244 diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index b57d6bce26633..2d4313db1b8ff 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -102,6 +102,9 @@ tasks.named("test").configure { } else { nonInputProperties.systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" } + if (BuildParams.random.nextBoolean()) { + env 'AWS_METADATA_SERVICE_TIMEOUT', '1' + } } tasks.named("check").configure { diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java index 256a5516a2ef2..b2475216a9ce7 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java @@ -8,6 +8,9 @@ package org.elasticsearch.discovery.ec2; +import com.amazonaws.SDKGlobalConfiguration; +import com.amazonaws.util.StringUtils; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; @@ -24,7 +27,11 @@ class AwsEc2Utils { private static final Logger logger = LogManager.getLogger(AwsEc2Utils.class); - private static final int CONNECT_TIMEOUT = 2000; + // The timeout can be configured via the AWS_METADATA_SERVICE_TIMEOUT environment variable + private static final int TIMEOUT = Optional.ofNullable(System.getenv(SDKGlobalConfiguration.AWS_METADATA_SERVICE_TIMEOUT_ENV_VAR)) + .filter(StringUtils::hasValue) + .map(s -> Integer.parseInt(s) * 1000) + .orElse(2000); private static final int METADATA_TOKEN_TTL_SECONDS = 10; static final String X_AWS_EC_2_METADATA_TOKEN = "X-aws-ec2-metadata-token"; @@ -39,7 +46,10 @@ static Optional getMetadataToken(String metadataTokenUrl) { try { urlConnection = (HttpURLConnection) new URL(metadataTokenUrl).openConnection(); urlConnection.setRequestMethod("PUT"); - urlConnection.setConnectTimeout(CONNECT_TIMEOUT); + // Use both timeout for connect and read timeout analogous to AWS SDK. + // See com.amazonaws.internal.HttpURLConnection#connectToEndpoint + urlConnection.setConnectTimeout(TIMEOUT); + urlConnection.setReadTimeout(TIMEOUT); urlConnection.setRequestProperty("X-aws-ec2-metadata-token-ttl-seconds", String.valueOf(METADATA_TOKEN_TTL_SECONDS)); } catch (IOException e) { logger.warn("Unable to access the IMDSv2 URI: " + metadataTokenUrl, e); diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java index b9bea564e2720..41b848954b551 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -121,6 +121,19 @@ public void testTokenMetadataApiIsMisbehaving() throws Exception { } } + public void testTokenMetadataApiDoesNotRespond() throws Exception { + try (var metadataServer = new MetadataServer("/metadata", exchange -> { + assertNull(exchange.getRequestHeaders().getFirst("X-aws-ec2-metadata-token")); + exchange.sendResponseHeaders(200, 0); + exchange.getResponseBody().write("us-east-1c".getBytes(StandardCharsets.UTF_8)); + exchange.close(); + }, "/latest/api/token", ex -> { + // Intentionally don't close the connection, so the client has to time out + })) { + assertNodeAttributes(Settings.EMPTY, metadataServer.metadataUri(), metadataServer.tokenUri(), "us-east-1c"); + } + } + public void testTokenMetadataApiIsNotAvailable() throws Exception { try (var metadataServer = metadataServerWithoutToken()) { assertNodeAttributes(Settings.EMPTY, metadataServer.metadataUri(), metadataServer.tokenUri(), "us-east-1c"); From a014e6b0ab300e19fd5dba60532b46d9a2c33d52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Tue, 16 Jan 2024 12:04:50 +0100 Subject: [PATCH 40/95] [Transform] Fix and unmute transform chaining test. (#104302) --- .../integration/TransformChainIT.java | 140 ++++++++++++------ .../integration/TransformRestTestCase.java | 14 ++ .../checkpoint/TransformGetCheckpointIT.java | 24 +++ 3 files changed, 136 insertions(+), 42 deletions(-) diff --git a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformChainIT.java b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformChainIT.java index b73adea63a223..1fb1b3ac0bc5c 100644 --- a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformChainIT.java +++ b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformChainIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.transform.integration; -import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.common.Strings; @@ -17,47 +16,34 @@ import org.junit.After; import org.junit.Before; +import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104238") public class TransformChainIT extends TransformRestTestCase { - private static final String DEST_INDEX_TEMPLATE = """ - { - "index_patterns": [ "my-transform-*-dest" ], - "mappings": { - "properties": { - "timestamp": { - "type": "date" - }, - "user_id": { - "type": "keyword" - }, - "stars": { - "type": "integer" - } - } - } - }"""; - + private static final String SET_INGEST_TIME_PIPELINE = "set_ingest_time"; private static final String TRANSFORM_CONFIG_TEMPLATE = """ { "source": { "index": "%s" }, "dest": { - "index": "%s" + "index": "%s", + "pipeline": "%s" }, "sync": { "time": { - "field": "timestamp" + "field": "event.ingested", + "delay": "10s" } }, "frequency": "%s", @@ -85,15 +71,67 @@ public class TransformChainIT extends TransformRestTestCase { }, "settings": { "unattended": true, - "deduce_mappings": %s + "deduce_mappings": %s, + "use_point_in_time": %s } }"""; private TestThreadPool threadPool; @Before - public void createThreadPool() { + public void setupTransformTests() throws IOException { threadPool = new TestThreadPool(getTestName()); + + // Create destination index template. It will be used by all the transforms in this test. + Request createIndexTemplateRequest = new Request("PUT", "_template/test_dest_index_template"); + createIndexTemplateRequest.setJsonEntity(""" + { + "index_patterns": [ "my-transform-*-dest" ], + "mappings": { + "properties": { + "timestamp": { + "type": "date" + }, + "user_id": { + "type": "keyword" + }, + "stars": { + "type": "integer" + } + } + } + }"""); + createIndexTemplateRequest.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING)); + assertAcknowledged(client().performRequest(createIndexTemplateRequest)); + + // Create ingest pipeline which sets event.ingested field. This is needed for transform's synchronisation to work correctly. + Request putIngestPipelineRequest = new Request("PUT", "_ingest/pipeline/" + SET_INGEST_TIME_PIPELINE); + putIngestPipelineRequest.setJsonEntity(""" + { + "description": "Set ingest timestamp.", + "processors": [ + { + "set": { + "field": "event.ingested", + "value": "{{{_ingest.timestamp}}}" + } + } + ] + }"""); + assertOK(client().performRequest(putIngestPipelineRequest)); + + // Set logging levels for debugging. + Request settingsRequest = new Request("PUT", "/_cluster/settings"); + settingsRequest.setJsonEntity(""" + { + "persistent": { + "logger.org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer": "debug", + "logger.org.elasticsearch.xpack.transform": "debug", + "logger.org.elasticsearch.xpack.transform.notifications": "debug", + "logger.org.elasticsearch.xpack.transform.transforms": "debug" + } + }"""); + assertOK(client().performRequest(settingsRequest)); } @After @@ -103,26 +141,36 @@ public void shutdownThreadPool() { } } - public void testChainedTransforms() throws Exception { - String reviewsIndexName = "reviews"; - final int numDocs = 100; - createReviewsIndex(reviewsIndexName, numDocs, 100, TransformIT::getUserIdForRow, TransformIT::getDateStringForRow); + public void testTwoChainedTransforms() throws Exception { + testChainedTransforms(2); + } - // Create destination index template. It will be used by all the transforms in this test. - Request createIndexTemplateRequest = new Request("PUT", "_template/test_dest_index_template"); - createIndexTemplateRequest.setJsonEntity(DEST_INDEX_TEMPLATE); - createIndexTemplateRequest.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING)); - assertAcknowledged(client().performRequest(createIndexTemplateRequest)); + public void testThreeChainedTransforms() throws Exception { + testChainedTransforms(3); + } - final int numberOfTransforms = 3; - List transformIds = new ArrayList<>(numberOfTransforms); + private void testChainedTransforms(final int numTransforms) throws Exception { + final String reviewsIndexName = "reviews"; + final int numDocs = 100; + final Instant now = Instant.now(); + createReviewsIndex( + reviewsIndexName, + numDocs, + 100, + TransformIT::getUserIdForRow, + row -> Instant.ofEpochMilli(now.toEpochMilli() - 1000 * numDocs + 1000 * row).toString(), + SET_INGEST_TIME_PIPELINE + ); + + List transformIds = new ArrayList<>(numTransforms); // Create the chain of transforms. Previous transform's destination index becomes next transform's source index. - for (int i = 0; i < numberOfTransforms; ++i) { - String transformId = "my-transform-" + i; + String transformIdPrefix = "my-transform-" + randomAlphaOfLength(4).toLowerCase(Locale.ROOT) + "-" + numTransforms + "-"; + for (int i = 0; i < numTransforms; ++i) { + String transformId = transformIdPrefix + i; transformIds.add(transformId); // Set up the transform so that its source index is the destination index of the previous transform in the chain. // The number of documents is expected to be the same in all the indices. - String sourceIndex = i == 0 ? reviewsIndexName : "my-transform-" + (i - 1) + "-dest"; + String sourceIndex = i == 0 ? reviewsIndexName : transformIds.get(i - 1) + "-dest"; String destIndex = transformId + "-dest"; assertFalse(indexExists(destIndex)); @@ -137,12 +185,11 @@ public void testChainedTransforms() throws Exception { startTransform(transformId, RequestOptions.DEFAULT); } - // Wait for the transforms to finish processing. Since the transforms are continuous, we cannot wait for them to be STOPPED. - // Instead, we wait for the expected number of processed documents. + // Give the transforms some time to finish processing. Since the transforms are continuous, we cannot wait for them to be STOPPED. assertBusy(() -> { + // Verify that each transform processed an expected number of documents. for (String transformId : transformIds) { Map stats = getTransformStats(transformId); - // Verify that all the documents got processed. assertThat( "Stats were: " + stats, XContentMapValues.extractValue(stats, "stats", "documents_processed"), @@ -162,6 +209,15 @@ public void testChainedTransforms() throws Exception { } private static String createTransformConfig(String sourceIndex, String destIndex) { - return Strings.format(TRANSFORM_CONFIG_TEMPLATE, sourceIndex, destIndex, "1s", "1s", randomBoolean()); + return Strings.format( + TRANSFORM_CONFIG_TEMPLATE, + sourceIndex, + destIndex, + SET_INGEST_TIME_PIPELINE, + "1s", + "1s", + randomBoolean(), + randomBoolean() + ); } } diff --git a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java index 184df6e098343..9c4241fa88ef5 100644 --- a/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java +++ b/x-pack/plugin/transform/qa/multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java @@ -421,6 +421,17 @@ protected void createReviewsIndex( int numUsers, Function userIdProvider, Function dateStringProvider + ) throws Exception { + createReviewsIndex(indexName, numDocs, numUsers, userIdProvider, dateStringProvider, null); + } + + protected void createReviewsIndex( + String indexName, + int numDocs, + int numUsers, + Function userIdProvider, + Function dateStringProvider, + String defaultPipeline ) throws Exception { assert numUsers > 0; @@ -461,6 +472,9 @@ protected void createReviewsIndex( .endObject() .endObject() .endObject(); + if (defaultPipeline != null) { + builder.startObject("settings").field("index.default_pipeline", defaultPipeline).endObject(); + } } builder.endObject(); diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointIT.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointIT.java index b952869a34d88..acb77ce1db4b4 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointIT.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointIT.java @@ -137,6 +137,30 @@ public void testGetCheckpointWithQueryThatFiltersOutEverything() throws Exceptio assertThat("Response was: " + response.getCheckpoints(), response.getCheckpoints(), is(anEmptyMap())); } + public void testGetCheckpointWithMissingIndex() throws Exception { + GetCheckpointAction.Request request = new GetCheckpointAction.Request( + new String[] { "test_index_missing" }, + IndicesOptions.LENIENT_EXPAND_OPEN, + null, + null, + TimeValue.timeValueSeconds(5) + ); + + GetCheckpointAction.Response response = client().execute(GetCheckpointAction.INSTANCE, request).get(); + assertThat("Response was: " + response.getCheckpoints(), response.getCheckpoints(), is(anEmptyMap())); + + request = new GetCheckpointAction.Request( + new String[] { "test_index_missing-*" }, + IndicesOptions.LENIENT_EXPAND_OPEN, + null, + null, + TimeValue.timeValueSeconds(5) + ); + + response = client().execute(GetCheckpointAction.INSTANCE, request).get(); + assertThat("Response was: " + response.getCheckpoints(), response.getCheckpoints(), is(anEmptyMap())); + } + public void testGetCheckpointTimeoutExceeded() throws Exception { final String indexNamePrefix = "test_index-"; final int indices = 100; From ebe71f9fe04abadbae3ce7831ef86bb83408319b Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Tue, 16 Jan 2024 12:58:50 +0100 Subject: [PATCH 41/95] Revert "Set read timeout for fetching IMDSv2 token" (#104397) --- docs/changelog/104253.yaml | 6 ------ plugins/discovery-ec2/build.gradle | 3 --- .../elasticsearch/discovery/ec2/AwsEc2Utils.java | 14 ++------------ .../discovery/ec2/Ec2DiscoveryPluginTests.java | 13 ------------- 4 files changed, 2 insertions(+), 34 deletions(-) delete mode 100644 docs/changelog/104253.yaml diff --git a/docs/changelog/104253.yaml b/docs/changelog/104253.yaml deleted file mode 100644 index bacde751e2507..0000000000000 --- a/docs/changelog/104253.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104253 -summary: Set read timeout for fetching IMDSv2 token -area: Discovery-Plugins -type: enhancement -issues: - - 104244 diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 2d4313db1b8ff..b57d6bce26633 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -102,9 +102,6 @@ tasks.named("test").configure { } else { nonInputProperties.systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" } - if (BuildParams.random.nextBoolean()) { - env 'AWS_METADATA_SERVICE_TIMEOUT', '1' - } } tasks.named("check").configure { diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java index b2475216a9ce7..256a5516a2ef2 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java @@ -8,9 +8,6 @@ package org.elasticsearch.discovery.ec2; -import com.amazonaws.SDKGlobalConfiguration; -import com.amazonaws.util.StringUtils; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; @@ -27,11 +24,7 @@ class AwsEc2Utils { private static final Logger logger = LogManager.getLogger(AwsEc2Utils.class); - // The timeout can be configured via the AWS_METADATA_SERVICE_TIMEOUT environment variable - private static final int TIMEOUT = Optional.ofNullable(System.getenv(SDKGlobalConfiguration.AWS_METADATA_SERVICE_TIMEOUT_ENV_VAR)) - .filter(StringUtils::hasValue) - .map(s -> Integer.parseInt(s) * 1000) - .orElse(2000); + private static final int CONNECT_TIMEOUT = 2000; private static final int METADATA_TOKEN_TTL_SECONDS = 10; static final String X_AWS_EC_2_METADATA_TOKEN = "X-aws-ec2-metadata-token"; @@ -46,10 +39,7 @@ static Optional getMetadataToken(String metadataTokenUrl) { try { urlConnection = (HttpURLConnection) new URL(metadataTokenUrl).openConnection(); urlConnection.setRequestMethod("PUT"); - // Use both timeout for connect and read timeout analogous to AWS SDK. - // See com.amazonaws.internal.HttpURLConnection#connectToEndpoint - urlConnection.setConnectTimeout(TIMEOUT); - urlConnection.setReadTimeout(TIMEOUT); + urlConnection.setConnectTimeout(CONNECT_TIMEOUT); urlConnection.setRequestProperty("X-aws-ec2-metadata-token-ttl-seconds", String.valueOf(METADATA_TOKEN_TTL_SECONDS)); } catch (IOException e) { logger.warn("Unable to access the IMDSv2 URI: " + metadataTokenUrl, e); diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java index 41b848954b551..b9bea564e2720 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -121,19 +121,6 @@ public void testTokenMetadataApiIsMisbehaving() throws Exception { } } - public void testTokenMetadataApiDoesNotRespond() throws Exception { - try (var metadataServer = new MetadataServer("/metadata", exchange -> { - assertNull(exchange.getRequestHeaders().getFirst("X-aws-ec2-metadata-token")); - exchange.sendResponseHeaders(200, 0); - exchange.getResponseBody().write("us-east-1c".getBytes(StandardCharsets.UTF_8)); - exchange.close(); - }, "/latest/api/token", ex -> { - // Intentionally don't close the connection, so the client has to time out - })) { - assertNodeAttributes(Settings.EMPTY, metadataServer.metadataUri(), metadataServer.tokenUri(), "us-east-1c"); - } - } - public void testTokenMetadataApiIsNotAvailable() throws Exception { try (var metadataServer = metadataServerWithoutToken()) { assertNodeAttributes(Settings.EMPTY, metadataServer.metadataUri(), metadataServer.tokenUri(), "us-east-1c"); From a3db0c7d5752c2d850a708808be6b10e45e140e4 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 16 Jan 2024 13:14:12 +0100 Subject: [PATCH 42/95] Move more XContent parser code that is test-only into test module (#104338) Just another round of #104261 --- .../mustache/MultiSearchTemplateResponse.java | 24 ----- .../MultiSearchTemplateResponseTests.java | 23 ++++- .../index/rankeval/RankEvalResponse.java | 41 --------- .../index/rankeval/RankEvalResponseTests.java | 34 ++++++- .../GetScriptLanguageResponse.java | 5 -- .../GetStoredScriptResponse.java | 32 ------- .../action/get/MultiGetResponse.java | 90 +------------------ .../ingest/SimulatePipelineResponse.java | 62 ------------- .../index/reindex/BulkByScrollResponse.java | 85 +----------------- .../org/elasticsearch/rest/RestResponse.java | 41 +-------- .../upgrades/FeatureMigrationResults.java | 29 +----- .../SingleFeatureMigrationResult.java | 31 +------ .../GetScriptLanguageResponseTests.java | 2 +- .../GetStoredScriptResponseTests.java | 34 ++++++- .../action/get/MultiGetResponseTests.java | 88 +++++++++++++++++- .../ingest/SimulatePipelineResponseTests.java | 57 +++++++++++- .../reindex/BulkByScrollResponseTests.java | 73 ++++++++++++++- .../elasticsearch/rest/RestResponseTests.java | 41 ++++++++- .../FeatureMigrationResultsTests.java | 49 +++++++++- 19 files changed, 403 insertions(+), 438 deletions(-) diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index 9bdabcede8ec6..b867fcfb905ea 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -12,7 +12,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -26,7 +25,6 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Iterator; @@ -204,28 +202,6 @@ static final class Fields { static final String STATUS = "status"; } - public static MultiSearchTemplateResponse fromXContext(XContentParser parser) { - // The MultiSearchTemplateResponse is identical to the multi search response so we reuse the parsing logic in multi search response - MultiSearchResponse mSearchResponse = MultiSearchResponse.fromXContext(parser); - try { - org.elasticsearch.action.search.MultiSearchResponse.Item[] responses = mSearchResponse.getResponses(); - Item[] templateResponses = new Item[responses.length]; - int i = 0; - for (org.elasticsearch.action.search.MultiSearchResponse.Item item : responses) { - SearchTemplateResponse stResponse = null; - if (item.getResponse() != null) { - stResponse = new SearchTemplateResponse(); - stResponse.setResponse(item.getResponse()); - item.getResponse().incRef(); - } - templateResponses[i++] = new Item(stResponse, item.getFailure()); - } - return new MultiSearchTemplateResponse(templateResponses, mSearchResponse.getTook().millis()); - } finally { - mSearchResponse.decRef(); - } - } - @Override public String toString() { return Strings.toString(this); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java index 03f2fbd3e81a7..86f23397cfadb 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.script.mustache; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Strings; @@ -95,8 +96,26 @@ private static MultiSearchTemplateResponse createTestInstanceWithFailures() { } @Override - protected MultiSearchTemplateResponse doParseInstance(XContentParser parser) throws IOException { - return MultiSearchTemplateResponse.fromXContext(parser); + protected MultiSearchTemplateResponse doParseInstance(XContentParser parser) { + // The MultiSearchTemplateResponse is identical to the multi search response so we reuse the parsing logic in multi search response + MultiSearchResponse mSearchResponse = MultiSearchResponse.fromXContext(parser); + try { + org.elasticsearch.action.search.MultiSearchResponse.Item[] responses = mSearchResponse.getResponses(); + MultiSearchTemplateResponse.Item[] templateResponses = new MultiSearchTemplateResponse.Item[responses.length]; + int i = 0; + for (org.elasticsearch.action.search.MultiSearchResponse.Item item : responses) { + SearchTemplateResponse stResponse = null; + if (item.getResponse() != null) { + stResponse = new SearchTemplateResponse(); + stResponse.setResponse(item.getResponse()); + item.getResponse().incRef(); + } + templateResponses[i++] = new MultiSearchTemplateResponse.Item(stResponse, item.getFailure()); + } + return new MultiSearchTemplateResponse(templateResponses, mSearchResponse.getTook().millis()); + } finally { + mSearchResponse.decRef(); + } } @Override diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java index cc7397637e04a..061d8292b3e5f 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java @@ -14,21 +14,13 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.common.xcontent.XContentParserUtils; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; /** * Returns the results for a {@link RankEvalRequest}.
@@ -111,37 +103,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } - - private static final ParseField DETAILS_FIELD = new ParseField("details"); - private static final ParseField FAILURES_FIELD = new ParseField("failures"); - @SuppressWarnings("unchecked") - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "rank_eval_response", - true, - a -> new RankEvalResponse( - (Double) a[0], - ((List) a[1]).stream().collect(Collectors.toMap(EvalQueryQuality::getId, Function.identity())), - ((List>) a[2]).stream().collect(Collectors.toMap(Tuple::v1, Tuple::v2)) - ) - ); - static { - PARSER.declareDouble(ConstructingObjectParser.constructorArg(), EvalQueryQuality.METRIC_SCORE_FIELD); - PARSER.declareNamedObjects( - ConstructingObjectParser.optionalConstructorArg(), - (p, c, n) -> EvalQueryQuality.fromXContent(p, n), - DETAILS_FIELD - ); - PARSER.declareNamedObjects(ConstructingObjectParser.optionalConstructorArg(), (p, c, n) -> { - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, p.nextToken(), p); - XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, p.nextToken(), p); - Tuple tuple = new Tuple<>(n, ElasticsearchException.failureFromXContent(p)); - XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, p.nextToken(), p); - return tuple; - }, FAILURES_FIELD); - - } - - public static RankEvalResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.apply(parser, null); - } } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java index d4ec7ba9b9ef5..d4d58c3c0ae71 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java @@ -21,10 +21,14 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -39,7 +43,9 @@ import java.util.List; import java.util.Map; import java.util.OptionalInt; +import java.util.function.Function; import java.util.function.Predicate; +import java.util.stream.Collectors; import static java.util.Collections.singleton; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; @@ -49,6 +55,32 @@ public class RankEvalResponseTests extends ESTestCase { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "rank_eval_response", + true, + a -> new RankEvalResponse( + (Double) a[0], + ((List) a[1]).stream().collect(Collectors.toMap(EvalQueryQuality::getId, Function.identity())), + ((List>) a[2]).stream().collect(Collectors.toMap(Tuple::v1, Tuple::v2)) + ) + ); + static { + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), EvalQueryQuality.METRIC_SCORE_FIELD); + PARSER.declareNamedObjects( + ConstructingObjectParser.optionalConstructorArg(), + (p, c, n) -> EvalQueryQuality.fromXContent(p, n), + new ParseField("details") + ); + PARSER.declareNamedObjects(ConstructingObjectParser.optionalConstructorArg(), (p, c, n) -> { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, p.nextToken(), p); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, p.nextToken(), p); + Tuple tuple = new Tuple<>(n, ElasticsearchException.failureFromXContent(p)); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, p.nextToken(), p); + return tuple; + }, new ParseField("failures")); + } + private static final Exception[] RANDOM_EXCEPTIONS = new Exception[] { new ClusterBlockException(singleton(NoMasterBlockService.NO_MASTER_BLOCK_WRITES)), new CircuitBreakingException("Data too large", 123, 456, CircuitBreaker.Durability.PERMANENT), @@ -117,7 +149,7 @@ public void testXContentParsing() throws IOException { BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, pathsToExclude, random()); RankEvalResponse parsedItem; try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { - parsedItem = RankEvalResponse.fromXContent(parser); + parsedItem = PARSER.apply(parser, null); assertNull(parser.nextToken()); } assertNotSame(testItem, parsedItem); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java index 36fe688b396da..f32cd3f7e0197 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponse.java @@ -15,7 +15,6 @@ import org.elasticsearch.script.ScriptLanguagesInfo; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; @@ -37,10 +36,6 @@ public void writeTo(StreamOutput out) throws IOException { info.writeTo(out); } - public static GetScriptLanguageResponse fromXContent(XContentParser parser) throws IOException { - return new GetScriptLanguageResponse(ScriptLanguagesInfo.fromXContent(parser)); - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java index 0202a0355abb6..24604a3977096 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponse.java @@ -13,47 +13,19 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.StoredScriptSource; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; - public class GetStoredScriptResponse extends ActionResponse implements ToXContentObject { public static final ParseField _ID_PARSE_FIELD = new ParseField("_id"); public static final ParseField FOUND_PARSE_FIELD = new ParseField("found"); public static final ParseField SCRIPT = new ParseField("script"); - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "GetStoredScriptResponse", - true, - (a, c) -> { - String id = (String) a[0]; - boolean found = (Boolean) a[1]; - StoredScriptSource scriptSource = (StoredScriptSource) a[2]; - return found ? new GetStoredScriptResponse(id, scriptSource) : new GetStoredScriptResponse(id, null); - } - ); - - static { - PARSER.declareField(constructorArg(), (p, c) -> p.text(), _ID_PARSE_FIELD, ObjectParser.ValueType.STRING); - PARSER.declareField(constructorArg(), (p, c) -> p.booleanValue(), FOUND_PARSE_FIELD, ObjectParser.ValueType.BOOLEAN); - PARSER.declareField( - optionalConstructorArg(), - (p, c) -> StoredScriptSource.fromXContent(p, true), - SCRIPT, - ObjectParser.ValueType.OBJECT - ); - } - private String id; private StoredScriptSource source; @@ -103,10 +75,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static GetStoredScriptResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - @Override public void writeTo(StreamOutput out) throws IOException { if (source == null) { diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index e91329e810397..4f548e227dcfb 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -15,30 +15,20 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.core.RestApiVersion; -import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.rest.action.document.RestMultiGetAction; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParser.Token; import java.io.IOException; -import java.util.ArrayList; import java.util.Iterator; -import java.util.List; public class MultiGetResponse extends ActionResponse implements Iterable, ToXContentObject { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MultiGetResponse.class); - private static final ParseField INDEX = new ParseField("_index"); - private static final ParseField TYPE = new ParseField("_type"); - private static final ParseField ID = new ParseField("_id"); - private static final ParseField ERROR = new ParseField("error"); - private static final ParseField DOCS = new ParseField("docs"); + static final ParseField INDEX = new ParseField("_index"); + static final ParseField ID = new ParseField("_id"); + static final ParseField DOCS = new ParseField("docs"); /** * Represents a failure. @@ -151,80 +141,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static MultiGetResponse fromXContent(XContentParser parser) throws IOException { - String currentFieldName = null; - List items = new ArrayList<>(); - for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { - switch (token) { - case FIELD_NAME: - currentFieldName = parser.currentName(); - break; - case START_ARRAY: - if (DOCS.getPreferredName().equals(currentFieldName)) { - for (token = parser.nextToken(); token != Token.END_ARRAY; token = parser.nextToken()) { - if (token == Token.START_OBJECT) { - items.add(parseItem(parser)); - } - } - } - break; - default: - // If unknown tokens are encounter then these should be ignored, because - // this is parsing logic on the client side. - break; - } - } - return new MultiGetResponse(items.toArray(new MultiGetItemResponse[0])); - } - - private static MultiGetItemResponse parseItem(XContentParser parser) throws IOException { - String currentFieldName = null; - String index = null; - String id = null; - ElasticsearchException exception = null; - GetResult getResult = null; - for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { - switch (token) { - case FIELD_NAME: - currentFieldName = parser.currentName(); - if (INDEX.match(currentFieldName, parser.getDeprecationHandler()) == false - && ID.match(currentFieldName, parser.getDeprecationHandler()) == false - && ERROR.match(currentFieldName, parser.getDeprecationHandler()) == false) { - getResult = GetResult.fromXContentEmbedded(parser, index, id); - } - break; - case VALUE_STRING: - if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { - index = parser.text(); - } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - deprecationLogger.compatibleCritical("mget_with_types", RestMultiGetAction.TYPES_DEPRECATION_MESSAGE); - } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { - id = parser.text(); - } - break; - case START_OBJECT: - if (ERROR.match(currentFieldName, parser.getDeprecationHandler())) { - exception = ElasticsearchException.fromXContent(parser); - } - break; - default: - // If unknown tokens are encounter then these should be ignored, because - // this is parsing logic on the client side. - break; - } - if (getResult != null) { - break; - } - } - - if (exception != null) { - return new MultiGetItemResponse(null, new Failure(index, id, exception)); - } else { - GetResponse getResponse = new GetResponse(getResult); - return new MultiGetItemResponse(getResponse, null); - } - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(responses); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java index f5dcb83fa36fc..396a5b63b3cd5 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulatePipelineResponse.java @@ -8,80 +8,22 @@ package org.elasticsearch.action.ingest; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParser.Token; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - public class SimulatePipelineResponse extends ActionResponse implements ToXContentObject { private String pipelineId; private boolean verbose; private List results; - @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "simulate_pipeline_response", - true, - a -> { - List results = (List) a[0]; - boolean verbose = false; - if (results.size() > 0) { - if (results.get(0) instanceof SimulateDocumentVerboseResult) { - verbose = true; - } - } - return new SimulatePipelineResponse(null, verbose, results); - } - ); - static { - PARSER.declareObjectArray(constructorArg(), (parser, context) -> { - Token token = parser.currentToken(); - ensureExpectedToken(Token.START_OBJECT, token, parser); - SimulateDocumentResult result = null; - while ((token = parser.nextToken()) != Token.END_OBJECT) { - ensureExpectedToken(Token.FIELD_NAME, token, parser); - String fieldName = parser.currentName(); - token = parser.nextToken(); - if (token == Token.START_ARRAY) { - if (fieldName.equals(SimulateDocumentVerboseResult.PROCESSOR_RESULT_FIELD)) { - List results = new ArrayList<>(); - while ((token = parser.nextToken()) == Token.START_OBJECT) { - results.add(SimulateProcessorResult.fromXContent(parser)); - } - ensureExpectedToken(Token.END_ARRAY, token, parser); - result = new SimulateDocumentVerboseResult(results); - } else { - parser.skipChildren(); - } - } else if (token.equals(Token.START_OBJECT)) { - switch (fieldName) { - case WriteableIngestDocument.DOC_FIELD -> result = new SimulateDocumentBaseResult( - WriteableIngestDocument.INGEST_DOC_PARSER.apply(parser, null).getIngestDocument() - ); - case "error" -> result = new SimulateDocumentBaseResult(ElasticsearchException.fromXContent(parser)); - default -> parser.skipChildren(); - } - } // else it is a value skip it - } - assert result != null; - return result; - }, new ParseField(Fields.DOCUMENTS)); - } - public SimulatePipelineResponse(StreamInput in) throws IOException { super(in); this.pipelineId = in.readOptionalString(); @@ -136,10 +78,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static SimulatePipelineResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - static final class Fields { static final String DOCUMENTS = "docs"; } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java index fedcbc1a076d0..5bdeac75989a8 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java @@ -8,23 +8,14 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.reindex.BulkByScrollTask.Status; -import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParser.Token; import java.io.IOException; import java.util.ArrayList; @@ -33,7 +24,6 @@ import static java.lang.Math.max; import static java.lang.Math.min; import static java.util.Objects.requireNonNull; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.core.TimeValue.timeValueNanos; /** @@ -46,23 +36,9 @@ public class BulkByScrollResponse extends ActionResponse implements ToXContentFr private List searchFailures; private boolean timedOut; - private static final String TOOK_FIELD = "took"; - private static final String TIMED_OUT_FIELD = "timed_out"; - private static final String FAILURES_FIELD = "failures"; - - @SuppressWarnings("unchecked") - private static final ObjectParser PARSER = new ObjectParser<>( - "bulk_by_scroll_response", - true, - BulkByScrollResponseBuilder::new - ); - static { - PARSER.declareLong(BulkByScrollResponseBuilder::setTook, new ParseField(TOOK_FIELD)); - PARSER.declareBoolean(BulkByScrollResponseBuilder::setTimedOut, new ParseField(TIMED_OUT_FIELD)); - PARSER.declareObjectArray(BulkByScrollResponseBuilder::setFailures, (p, c) -> parseFailure(p), new ParseField(FAILURES_FIELD)); - // since the result of BulkByScrollResponse.Status are mixed we also parse that in this - Status.declareFields(PARSER); - } + static final String TOOK_FIELD = "took"; + static final String TIMED_OUT_FIELD = "timed_out"; + static final String FAILURES_FIELD = "failures"; public BulkByScrollResponse(StreamInput in) throws IOException { super(in); @@ -195,7 +171,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(TOOK_FIELD, took.millis()); builder.field(TIMED_OUT_FIELD, timedOut); status.innerXContent(builder, params); - builder.startArray("failures"); + builder.startArray(FAILURES_FIELD); for (Failure failure : bulkFailures) { builder.startObject(); failure.toXContent(builder, params); @@ -208,59 +184,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static BulkByScrollResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null).buildResponse(); - } - - private static Object parseFailure(XContentParser parser) throws IOException { - ensureExpectedToken(Token.START_OBJECT, parser.currentToken(), parser); - Token token; - String index = null; - String id = null; - Integer status = null; - Integer shardId = null; - String nodeId = null; - ElasticsearchException bulkExc = null; - ElasticsearchException searchExc = null; - while ((token = parser.nextToken()) != Token.END_OBJECT) { - ensureExpectedToken(Token.FIELD_NAME, token, parser); - String name = parser.currentName(); - token = parser.nextToken(); - if (token == Token.START_ARRAY) { - parser.skipChildren(); - } else if (token == Token.START_OBJECT) { - switch (name) { - case SearchFailure.REASON_FIELD -> searchExc = ElasticsearchException.fromXContent(parser); - case Failure.CAUSE_FIELD -> bulkExc = ElasticsearchException.fromXContent(parser); - default -> parser.skipChildren(); - } - } else if (token == Token.VALUE_STRING) { - switch (name) { - // This field is the same as SearchFailure.index - case Failure.INDEX_FIELD -> index = parser.text(); - case Failure.ID_FIELD -> id = parser.text(); - case SearchFailure.NODE_FIELD -> nodeId = parser.text(); - } - } else if (token == Token.VALUE_NUMBER) { - switch (name) { - case Failure.STATUS_FIELD -> status = parser.intValue(); - case SearchFailure.SHARD_FIELD -> shardId = parser.intValue(); - } - } - } - if (bulkExc != null) { - return new Failure(index, id, bulkExc, RestStatus.fromCode(status)); - } else if (searchExc != null) { - if (status == null) { - return new SearchFailure(searchExc, index, shardId, nodeId); - } else { - return new SearchFailure(searchExc, index, shardId, nodeId, RestStatus.fromCode(status)); - } - } else { - throw new ElasticsearchParseException("failed to parse failures array. At least one of {reason,cause} must be present"); - } - } - @Override public String toString() { StringBuilder builder = new StringBuilder(); diff --git a/server/src/main/java/org/elasticsearch/rest/RestResponse.java b/server/src/main/java/org/elasticsearch/rest/RestResponse.java index 55adc67bf18e6..2bb7bdc41bcf9 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestResponse.java +++ b/server/src/main/java/org/elasticsearch/rest/RestResponse.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -21,7 +20,6 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; @@ -33,14 +31,13 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.rest.RestController.ELASTIC_PRODUCT_HTTP_HEADER; public final class RestResponse { public static final String TEXT_CONTENT_TYPE = "text/plain; charset=UTF-8"; - private static final String STATUS = "status"; + static final String STATUS = "status"; private static final Logger SUPPRESSED_ERROR_LOGGER = LogManager.getLogger("rest.suppressed"); @@ -189,42 +186,6 @@ static RestResponse createSimpleErrorResponse(RestChannel channel, RestStatus st ); } - public static ElasticsearchStatusException errorFromXContent(XContentParser parser) throws IOException { - XContentParser.Token token = parser.nextToken(); - ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); - - ElasticsearchException exception = null; - RestStatus status = null; - - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } - if (STATUS.equals(currentFieldName)) { - if (token != XContentParser.Token.FIELD_NAME) { - ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, token, parser); - status = RestStatus.fromCode(parser.intValue()); - } - } else { - exception = ElasticsearchException.failureFromXContent(parser); - } - } - - if (exception == null) { - throw new IllegalStateException("Failed to parse elasticsearch status exception: no exception was found"); - } - - ElasticsearchStatusException result = new ElasticsearchStatusException(exception.getMessage(), status, exception.getCause()); - for (String header : exception.getHeaderKeys()) { - result.addHeader(header, exception.getHeader(header)); - } - for (String metadata : exception.getMetadataKeys()) { - result.addMetadata(metadata, exception.getMetadata(metadata)); - } - return result; - } - public void copyHeaders(ElasticsearchException ex) { Set headerKeySet = ex.getHeaderKeys(); if (customHeaders == null) { diff --git a/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java b/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java index 75ab5db982235..04a0b3434814a 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java +++ b/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java @@ -18,22 +18,17 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.TreeMap; -import java.util.stream.Collectors; /** * Holds the results of the most recent attempt to migrate system indices. Updated by {@link SystemIndexMigrator} as it finishes each @@ -43,25 +38,7 @@ public class FeatureMigrationResults implements Metadata.Custom { public static final String TYPE = "system_index_migration"; public static final TransportVersion MIGRATION_ADDED_VERSION = TransportVersions.V_8_0_0; - private static final ParseField RESULTS_FIELD = new ParseField("results"); - - @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(TYPE, a -> { - final Map statuses = ((List>) a[0]).stream() - .collect(Collectors.toMap(Tuple::v1, Tuple::v2)); - return new FeatureMigrationResults(statuses); - }); - - static { - PARSER.declareNamedObjects( - ConstructingObjectParser.constructorArg(), - (p, c, n) -> new Tuple<>(n, SingleFeatureMigrationResult.fromXContent(p)), - v -> { - throw new IllegalArgumentException("ordered " + RESULTS_FIELD.getPreferredName() + " are not supported"); - }, - RESULTS_FIELD - ); - } + static final ParseField RESULTS_FIELD = new ParseField("results"); private final Map featureStatuses; @@ -83,10 +60,6 @@ public Iterator toXContentChunked(ToXContent.Params ignore return ChunkedToXContentHelper.xContentValuesMap(RESULTS_FIELD.getPreferredName(), featureStatuses); } - public static FeatureMigrationResults fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - /** * Gets a map of feature name to that feature's status. Only contains features which have either been migrated successfully or * failed to migrate. diff --git a/server/src/main/java/org/elasticsearch/upgrades/SingleFeatureMigrationResult.java b/server/src/main/java/org/elasticsearch/upgrades/SingleFeatureMigrationResult.java index db1c325dfbb7f..24ed1943ed04e 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/SingleFeatureMigrationResult.java +++ b/server/src/main/java/org/elasticsearch/upgrades/SingleFeatureMigrationResult.java @@ -14,11 +14,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; @@ -27,10 +25,9 @@ * Holds the results of migrating a single feature. See also {@link FeatureMigrationResults}. */ public class SingleFeatureMigrationResult implements SimpleDiffable, Writeable, ToXContentObject { - private static final String NAME = "feature_migration_status"; - private static final ParseField SUCCESS_FIELD = new ParseField("successful"); - private static final ParseField FAILED_INDEX_NAME_FIELD = new ParseField("failed_index"); - private static final ParseField EXCEPTION_FIELD = new ParseField("exception"); + static final ParseField SUCCESS_FIELD = new ParseField("successful"); + static final ParseField FAILED_INDEX_NAME_FIELD = new ParseField("failed_index"); + static final ParseField EXCEPTION_FIELD = new ParseField("exception"); private final boolean successful; @Nullable @@ -38,23 +35,7 @@ public class SingleFeatureMigrationResult implements SimpleDiffable PARSER = new ConstructingObjectParser<>( - NAME, - a -> new SingleFeatureMigrationResult((boolean) a[0], (String) a[1], (Exception) a[2]) - ); - - static { - PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), SUCCESS_FIELD); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FAILED_INDEX_NAME_FIELD); - PARSER.declareObject( - ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> ElasticsearchException.fromXContent(p), - EXCEPTION_FIELD - ); - } - - private SingleFeatureMigrationResult(boolean successful, String failedIndexName, Exception exception) { + SingleFeatureMigrationResult(boolean successful, String failedIndexName, Exception exception) { this.successful = successful; if (successful == false) { Objects.requireNonNull(failedIndexName, "failed index name must be present for failed feature migration statuses"); @@ -75,10 +56,6 @@ private SingleFeatureMigrationResult(boolean successful, String failedIndexName, } } - public static SingleFeatureMigrationResult fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - /** * Creates a record indicating that migration succeeded. */ diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java index ff70e7e6756ed..f8d3871fbfa8f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetScriptLanguageResponseTests.java @@ -38,7 +38,7 @@ protected GetScriptLanguageResponse createTestInstance() { @Override protected GetScriptLanguageResponse doParseInstance(XContentParser parser) throws IOException { - return GetScriptLanguageResponse.fromXContent(parser); + return new GetScriptLanguageResponse(ScriptLanguagesInfo.fromXContent(parser)); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java index 500080fa9f118..05820c071052c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/storedscripts/GetStoredScriptResponseTests.java @@ -12,6 +12,8 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.StoredScriptSource; import org.elasticsearch.test.AbstractXContentSerializingTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; @@ -20,11 +22,41 @@ import java.util.Map; import java.util.function.Predicate; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + public class GetStoredScriptResponseTests extends AbstractXContentSerializingTestCase { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "GetStoredScriptResponse", + true, + (a, c) -> { + String id = (String) a[0]; + boolean found = (Boolean) a[1]; + StoredScriptSource scriptSource = (StoredScriptSource) a[2]; + return found ? new GetStoredScriptResponse(id, scriptSource) : new GetStoredScriptResponse(id, null); + } + ); + + static { + PARSER.declareField(constructorArg(), (p, c) -> p.text(), GetStoredScriptResponse._ID_PARSE_FIELD, ObjectParser.ValueType.STRING); + PARSER.declareField( + constructorArg(), + (p, c) -> p.booleanValue(), + GetStoredScriptResponse.FOUND_PARSE_FIELD, + ObjectParser.ValueType.BOOLEAN + ); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> StoredScriptSource.fromXContent(p, true), + GetStoredScriptResponse.SCRIPT, + ObjectParser.ValueType.OBJECT + ); + } + @Override protected GetStoredScriptResponse doParseInstance(XContentParser parser) throws IOException { - return GetStoredScriptResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java index b858456d78075..fd005c450e80f 100644 --- a/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/MultiGetResponseTests.java @@ -7,15 +7,21 @@ */ package org.elasticsearch.action.get; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.rest.action.document.RestMultiGetAction; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -23,6 +29,12 @@ public class MultiGetResponseTests extends ESTestCase { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MultiGetResponse.class); + + private static final ParseField TYPE = new ParseField("_type"); + + private static final ParseField ERROR = new ParseField("error"); + public void testFromXContent() throws IOException { for (int runs = 0; runs < 20; runs++) { MultiGetResponse expected = createTestInstance(); @@ -30,7 +42,7 @@ public void testFromXContent() throws IOException { BytesReference shuffled = toShuffledXContent(expected, xContentType, ToXContent.EMPTY_PARAMS, false); MultiGetResponse parsed; try (XContentParser parser = createParser(XContentFactory.xContent(xContentType), shuffled)) { - parsed = MultiGetResponse.fromXContent(parser); + parsed = parseInstance(parser); assertNull(parser.nextToken()); } assertNotSame(expected, parsed); @@ -77,4 +89,78 @@ private static MultiGetResponse createTestInstance() { return new MultiGetResponse(items); } + public static MultiGetResponse parseInstance(XContentParser parser) throws IOException { + String currentFieldName = null; + List items = new ArrayList<>(); + for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { + switch (token) { + case FIELD_NAME: + currentFieldName = parser.currentName(); + break; + case START_ARRAY: + if (MultiGetResponse.DOCS.getPreferredName().equals(currentFieldName)) { + for (token = parser.nextToken(); token != XContentParser.Token.END_ARRAY; token = parser.nextToken()) { + if (token == XContentParser.Token.START_OBJECT) { + items.add(parseItem(parser)); + } + } + } + break; + default: + // If unknown tokens are encounter then these should be ignored, because + // this is parsing logic on the client side. + break; + } + } + return new MultiGetResponse(items.toArray(new MultiGetItemResponse[0])); + } + + private static MultiGetItemResponse parseItem(XContentParser parser) throws IOException { + String currentFieldName = null; + String index = null; + String id = null; + ElasticsearchException exception = null; + GetResult getResult = null; + for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { + switch (token) { + case FIELD_NAME: + currentFieldName = parser.currentName(); + if (MultiGetResponse.INDEX.match(currentFieldName, parser.getDeprecationHandler()) == false + && MultiGetResponse.ID.match(currentFieldName, parser.getDeprecationHandler()) == false + && ERROR.match(currentFieldName, parser.getDeprecationHandler()) == false) { + getResult = GetResult.fromXContentEmbedded(parser, index, id); + } + break; + case VALUE_STRING: + if (MultiGetResponse.INDEX.match(currentFieldName, parser.getDeprecationHandler())) { + index = parser.text(); + } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + deprecationLogger.compatibleCritical("mget_with_types", RestMultiGetAction.TYPES_DEPRECATION_MESSAGE); + } else if (MultiGetResponse.ID.match(currentFieldName, parser.getDeprecationHandler())) { + id = parser.text(); + } + break; + case START_OBJECT: + if (ERROR.match(currentFieldName, parser.getDeprecationHandler())) { + exception = ElasticsearchException.fromXContent(parser); + } + break; + default: + // If unknown tokens are encounter then these should be ignored, because + // this is parsing logic on the client side. + break; + } + if (getResult != null) { + break; + } + } + + if (exception != null) { + return new MultiGetItemResponse(null, new MultiGetResponse.Failure(index, id, exception)); + } else { + GetResponse getResponse = new GetResponse(getResult); + return new MultiGetItemResponse(getResponse, null); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java index 4954406c14db0..1ec54638f9687 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/SimulatePipelineResponseTests.java @@ -8,9 +8,12 @@ package org.elasticsearch.action.ingest; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -21,13 +24,65 @@ import java.util.function.Predicate; import java.util.function.Supplier; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.nullValue; public class SimulatePipelineResponseTests extends AbstractXContentTestCase { + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "simulate_pipeline_response", + true, + a -> { + List results = (List) a[0]; + boolean verbose = false; + if (results.size() > 0) { + if (results.get(0) instanceof SimulateDocumentVerboseResult) { + verbose = true; + } + } + return new SimulatePipelineResponse(null, verbose, results); + } + ); + static { + PARSER.declareObjectArray(constructorArg(), (parser, context) -> { + XContentParser.Token token = parser.currentToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + SimulateDocumentResult result = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + String fieldName = parser.currentName(); + token = parser.nextToken(); + if (token == XContentParser.Token.START_ARRAY) { + if (fieldName.equals(SimulateDocumentVerboseResult.PROCESSOR_RESULT_FIELD)) { + List results = new ArrayList<>(); + while ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) { + results.add(SimulateProcessorResult.fromXContent(parser)); + } + ensureExpectedToken(XContentParser.Token.END_ARRAY, token, parser); + result = new SimulateDocumentVerboseResult(results); + } else { + parser.skipChildren(); + } + } else if (token.equals(XContentParser.Token.START_OBJECT)) { + switch (fieldName) { + case WriteableIngestDocument.DOC_FIELD -> result = new SimulateDocumentBaseResult( + WriteableIngestDocument.INGEST_DOC_PARSER.apply(parser, null).getIngestDocument() + ); + case "error" -> result = new SimulateDocumentBaseResult(ElasticsearchException.fromXContent(parser)); + default -> parser.skipChildren(); + } + } // else it is a value skip it + } + assert result != null; + return result; + }, new ParseField(SimulatePipelineResponse.Fields.DOCUMENTS)); + } + public void testSerialization() throws IOException { boolean isVerbose = randomBoolean(); String id = randomBoolean() ? randomAlphaOfLengthBetween(1, 10) : null; @@ -118,7 +173,7 @@ protected SimulatePipelineResponse createTestInstance() { @Override protected SimulatePipelineResponse doParseInstance(XContentParser parser) { - return SimulatePipelineResponse.fromXContent(parser); + return PARSER.apply(parser, null); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java index ef32360722474..f8162eb987226 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; @@ -16,7 +17,10 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.reindex.BulkByScrollTask.Status; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; @@ -28,10 +32,77 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.apache.lucene.tests.util.TestUtil.randomSimpleString; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.elasticsearch.core.TimeValue.timeValueMillis; public class BulkByScrollResponseTests extends AbstractXContentTestCase { + private static final ObjectParser PARSER = new ObjectParser<>( + "bulk_by_scroll_response", + true, + BulkByScrollResponseBuilder::new + ); + static { + PARSER.declareLong(BulkByScrollResponseBuilder::setTook, new ParseField(BulkByScrollResponse.TOOK_FIELD)); + PARSER.declareBoolean(BulkByScrollResponseBuilder::setTimedOut, new ParseField(BulkByScrollResponse.TIMED_OUT_FIELD)); + PARSER.declareObjectArray( + BulkByScrollResponseBuilder::setFailures, + (p, c) -> parseFailure(p), + new ParseField(BulkByScrollResponse.FAILURES_FIELD) + ); + // since the result of BulkByScrollResponse.Status are mixed we also parse that in this + Status.declareFields(PARSER); + } + + private static Object parseFailure(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + XContentParser.Token token; + String index = null; + String id = null; + Integer status = null; + Integer shardId = null; + String nodeId = null; + ElasticsearchException bulkExc = null; + ElasticsearchException searchExc = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser); + String name = parser.currentName(); + token = parser.nextToken(); + if (token == XContentParser.Token.START_ARRAY) { + parser.skipChildren(); + } else if (token == XContentParser.Token.START_OBJECT) { + switch (name) { + case ScrollableHitSource.SearchFailure.REASON_FIELD -> searchExc = ElasticsearchException.fromXContent(parser); + case Failure.CAUSE_FIELD -> bulkExc = ElasticsearchException.fromXContent(parser); + default -> parser.skipChildren(); + } + } else if (token == XContentParser.Token.VALUE_STRING) { + switch (name) { + // This field is the same as SearchFailure.index + case Failure.INDEX_FIELD -> index = parser.text(); + case Failure.ID_FIELD -> id = parser.text(); + case ScrollableHitSource.SearchFailure.NODE_FIELD -> nodeId = parser.text(); + } + } else if (token == XContentParser.Token.VALUE_NUMBER) { + switch (name) { + case Failure.STATUS_FIELD -> status = parser.intValue(); + case ScrollableHitSource.SearchFailure.SHARD_FIELD -> shardId = parser.intValue(); + } + } + } + if (bulkExc != null) { + return new Failure(index, id, bulkExc, RestStatus.fromCode(status)); + } else if (searchExc != null) { + if (status == null) { + return new ScrollableHitSource.SearchFailure(searchExc, index, shardId, nodeId); + } else { + return new ScrollableHitSource.SearchFailure(searchExc, index, shardId, nodeId, RestStatus.fromCode(status)); + } + } else { + throw new ElasticsearchParseException("failed to parse failures array. At least one of {reason,cause} must be present"); + } + } + private boolean includeUpdated; private boolean includeCreated; private boolean testExceptions = randomBoolean(); @@ -160,7 +231,7 @@ protected BulkByScrollResponse createTestInstance() { @Override protected BulkByScrollResponse doParseInstance(XContentParser parser) throws IOException { - return BulkByScrollResponse.fromXContent(parser); + return PARSER.apply(parser, null).buildResponse(); } @Override diff --git a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java index 41710d6c1b76c..4125c9bb66b4f 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestResponseTests.java @@ -49,6 +49,7 @@ import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; import static org.elasticsearch.ElasticsearchExceptionTests.assertDeepEquals; +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -420,7 +421,7 @@ public void testErrorToAndFromXContent() throws IOException { ElasticsearchException parsedError; try (XContentParser parser = createParser(xContentType.xContent(), response.content())) { - parsedError = RestResponse.errorFromXContent(parser); + parsedError = errorFromXContent(parser); assertNull(parser.nextToken()); } @@ -436,13 +437,49 @@ public void testNoErrorFromXContent() throws IOException { builder.endObject(); try (XContentParser parser = createParser(builder.contentType().xContent(), BytesReference.bytes(builder))) { - RestResponse.errorFromXContent(parser); + errorFromXContent(parser); } } }); assertEquals("Failed to parse elasticsearch status exception: no exception was found", e.getMessage()); } + private static ElasticsearchStatusException errorFromXContent(XContentParser parser) throws IOException { + XContentParser.Token token = parser.nextToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + + ElasticsearchException exception = null; + RestStatus status = null; + + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } + if (RestResponse.STATUS.equals(currentFieldName)) { + if (token != XContentParser.Token.FIELD_NAME) { + ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, token, parser); + status = RestStatus.fromCode(parser.intValue()); + } + } else { + exception = ElasticsearchException.failureFromXContent(parser); + } + } + + if (exception == null) { + throw new IllegalStateException("Failed to parse elasticsearch status exception: no exception was found"); + } + + ElasticsearchStatusException result = new ElasticsearchStatusException(exception.getMessage(), status, exception.getCause()); + for (String header : exception.getHeaderKeys()) { + result.addHeader(header, exception.getHeader(header)); + } + for (String metadata : exception.getMetadataKeys()) { + result.addMetadata(metadata, exception.getMetadata(metadata)); + } + return result; + } + public void testResponseContentTypeUponException() throws Exception { String mediaType = XContentType.VND_JSON.toParsedMediaType() .responseContentTypeHeader( diff --git a/server/src/test/java/org/elasticsearch/upgrades/FeatureMigrationResultsTests.java b/server/src/test/java/org/elasticsearch/upgrades/FeatureMigrationResultsTests.java index 6dbae94492aaf..b7fff65a19b64 100644 --- a/server/src/test/java/org/elasticsearch/upgrades/FeatureMigrationResultsTests.java +++ b/server/src/test/java/org/elasticsearch/upgrades/FeatureMigrationResultsTests.java @@ -8,17 +8,64 @@ package org.elasticsearch.upgrades; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ChunkedToXContentDiffableSerializationTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; public class FeatureMigrationResultsTests extends ChunkedToXContentDiffableSerializationTestCase { + private static final ConstructingObjectParser SINGLE_FEATURE_RESULT_PARSER = + new ConstructingObjectParser<>( + "feature_migration_status", + a -> new SingleFeatureMigrationResult((boolean) a[0], (String) a[1], (Exception) a[2]) + ); + + static { + SINGLE_FEATURE_RESULT_PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), SingleFeatureMigrationResult.SUCCESS_FIELD); + SINGLE_FEATURE_RESULT_PARSER.declareString( + ConstructingObjectParser.optionalConstructorArg(), + SingleFeatureMigrationResult.FAILED_INDEX_NAME_FIELD + ); + SINGLE_FEATURE_RESULT_PARSER.declareObject( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), + SingleFeatureMigrationResult.EXCEPTION_FIELD + ); + } + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + FeatureMigrationResults.TYPE, + a -> { + final Map statuses = ((List>) a[0]).stream() + .collect(Collectors.toMap(Tuple::v1, Tuple::v2)); + return new FeatureMigrationResults(statuses); + } + ); + + static { + PARSER.declareNamedObjects( + ConstructingObjectParser.constructorArg(), + (p, c, n) -> new Tuple<>(n, SINGLE_FEATURE_RESULT_PARSER.apply(p, c)), + v -> { + throw new IllegalArgumentException( + "ordered " + FeatureMigrationResults.RESULTS_FIELD.getPreferredName() + " are not supported" + ); + }, + FeatureMigrationResults.RESULTS_FIELD + ); + } + @Override protected FeatureMigrationResults createTestInstance() { return new FeatureMigrationResults(randomMap(0, 10, () -> new Tuple<>(randomAlphaOfLength(5), randomFeatureStatus()))); @@ -60,7 +107,7 @@ protected Writeable.Reader instanceReader() { @Override protected FeatureMigrationResults doParseInstance(XContentParser parser) throws IOException { - return FeatureMigrationResults.fromXContent(parser); + return PARSER.apply(parser, null); } @Override From 8739f9a66530c02c734555f4a0976120ca8808da Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Tue, 16 Jan 2024 12:46:07 +0000 Subject: [PATCH 43/95] Update example plugin for API changes in #103277 (#104357) --- .../example/resthandler/ExampleRestHandlerPlugin.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/examples/rest-handler/src/main/java/org/elasticsearch/example/resthandler/ExampleRestHandlerPlugin.java b/plugins/examples/rest-handler/src/main/java/org/elasticsearch/example/resthandler/ExampleRestHandlerPlugin.java index 59131731d25e1..e142ba80147e0 100644 --- a/plugins/examples/rest-handler/src/main/java/org/elasticsearch/example/resthandler/ExampleRestHandlerPlugin.java +++ b/plugins/examples/rest-handler/src/main/java/org/elasticsearch/example/resthandler/ExampleRestHandlerPlugin.java @@ -10,6 +10,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; @@ -28,6 +29,7 @@ public class ExampleRestHandlerPlugin extends Plugin implements ActionPlugin { @Override public List getRestHandlers(final Settings settings, + final NamedWriteableRegistry namedWriteableRegistry, final RestController restController, final ClusterSettings clusterSettings, final IndexScopedSettings indexScopedSettings, From 0c98fb2a8ec4ab6a41d10dec3171e0c57a5589cf Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Tue, 16 Jan 2024 08:07:11 -0500 Subject: [PATCH 44/95] Simplify the int8_hnsw MIP yaml test (#104331) The test is unnecessarily complicated with its vectors. This simplifies the vectors and the test. We mainly care about extreme weirdness & server level failures. closes: https://github.com/elastic/elasticsearch/issues/104297 --- .../search.vectors/41_knn_search_byte_quantized.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml index 948a6e04a128b..433592a32f963 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/41_knn_search_byte_quantized.yml @@ -249,7 +249,7 @@ setup: id: "1" body: name: cow.jpg - vector: [230.0, 300.33, -34.8988, 15.555, -200.0] + vector: [1, 2, 3, 4, 5] - do: index: @@ -257,7 +257,7 @@ setup: id: "2" body: name: moose.jpg - vector: [-0.5, 10.0, -13, 14.8, 15.0] + vector: [1, 1, 1, 1, 1] - do: index: @@ -265,7 +265,7 @@ setup: id: "3" body: name: rabbit.jpg - vector: [0.5, 111.3, -13.0, 14.8, -156.0] + vector: [1, 2, 2, 2, 2] # We force merge into a single segment to make sure scores are more uniform # Each segment can have a different quantization error, which can affect scores and mip is especially sensitive to this @@ -286,7 +286,7 @@ setup: num_candidates: 3 k: 3 field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [1, 2, 3, 4, 5] - length: {hits.hits: 3} @@ -303,7 +303,7 @@ setup: num_candidates: 3 k: 3 field: vector - query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + query_vector: [1, 2, 3, 4, 5] filter: { "term": { "name": "moose.jpg" } } From 9cc331c79bd1b66a780dfbd490cea170ea1ea4fb Mon Sep 17 00:00:00 2001 From: Michael Peterson Date: Tue, 16 Jan 2024 08:34:00 -0500 Subject: [PATCH 45/95] CCS with minimize_roundtrips performs incremental merges of each SearchResponse (#103134) * CCS with minimize_roundtrips performs incremental merges of each SearchResponse To help address the issue of slow-to-respond clusters in a cross-cluster search, async-search based CCS with minimize_roundtrips=true performs incremental merges of each SearchResponse as they come in from each cluster (including the local cluster). This means, any time the user calls GET _async_search/:id, they will now get search hits and/or aggregation results from any clusters that have finished so far, as well as any partial aggs from the local cluster (existing functionality). The `is_running` field in the async-search response should be used to determine whether at least one cluster has still not reported back its final results. The SearchResponses are collected by MutableSearchResponse. When a user requests an AsyncSearchResponse, if the final response (from onResponse) has not been received, then it will create a new SearchResponseMerger on the fly using the Supplier of SearchResponseMerger in the SearchTask. This is non-null only for CCS MRT=true. --- docs/changelog/103134.yaml | 5 + .../action/search/SearchProgressListener.java | 17 + .../action/search/SearchResponseMerger.java | 13 +- .../action/search/SearchTask.java | 16 + .../action/search/TransportSearchAction.java | 12 +- .../search/SearchResponseMergerTests.java | 434 ++++++++++++++++++ .../search/TransportSearchActionTests.java | 151 +++++- .../xpack/search/AsyncSearchTask.java | 13 + .../xpack/search/MutableSearchResponse.java | 130 +++++- 9 files changed, 765 insertions(+), 26 deletions(-) create mode 100644 docs/changelog/103134.yaml diff --git a/docs/changelog/103134.yaml b/docs/changelog/103134.yaml new file mode 100644 index 0000000000000..13bb0323645f5 --- /dev/null +++ b/docs/changelog/103134.yaml @@ -0,0 +1,5 @@ +pr: 103134 +summary: CCS with `minimize_roundtrips` performs incremental merges of each `SearchResponse` +area: Search +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java b/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java index 096f2606d3f02..f5d280a01257c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java @@ -104,6 +104,15 @@ protected void onFetchResult(int shardIndex) {} */ protected void onFetchFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {} + /** + * Indicates that a cluster has finished a search operation. Used for CCS minimize_roundtrips=true only. + * + * @param clusterAlias alias of cluster that has finished a search operation and returned a SearchResponse. + * The cluster alias for the local cluster is RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY. + * @param searchResponse SearchResponse from cluster 'clusterAlias' + */ + protected void onClusterResponseMinimizeRoundtrips(String clusterAlias, SearchResponse searchResponse) {} + final void notifyListShards( List shards, List skippedShards, @@ -167,6 +176,14 @@ final void notifyFetchFailure(int shardIndex, SearchShardTarget shardTarget, Exc } } + final void notifyClusterResponseMinimizeRoundtrips(String clusterAlias, SearchResponse searchResponse) { + try { + onClusterResponseMinimizeRoundtrips(clusterAlias, searchResponse); + } catch (Exception e) { + logger.warn(() -> "[" + clusterAlias + "] Failed to execute progress listener onResponseMinimizeRoundtrips", e); + } + } + static List buildSearchShards(List results) { return results.stream() .filter(Objects::nonNull) diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index 1b616b9f3bc87..0586cbb9046dc 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -65,7 +65,7 @@ // TODO it may make sense to integrate the remote clusters responses as a shard response in the initial search phase and ignore hits coming // from the remote clusters in the fetch phase. This would be identical to the removed QueryAndFetch strategy except that only the remote // cluster response would have the fetch results. -final class SearchResponseMerger implements Releasable { +public final class SearchResponseMerger implements Releasable { final int from; final int size; final int trackTotalHitsUpTo; @@ -98,7 +98,7 @@ final class SearchResponseMerger implements Releasable { * Merges currently happen at once when all responses are available and {@link #getMergedResponse(Clusters)} )} is called. * That may change in the future as it's possible to introduce incremental merges as responses come in if necessary. */ - void add(SearchResponse searchResponse) { + public void add(SearchResponse searchResponse) { assert searchResponse.getScrollId() == null : "merging scroll results is not supported"; searchResponse.mustIncRef(); searchResponses.add(searchResponse); @@ -109,10 +109,13 @@ int numResponses() { } /** - * Returns the merged response. To be called once all responses have been added through {@link #add(SearchResponse)} - * so that all responses are merged into a single one. + * Returns the merged response of all SearchResponses received so far. Can be called at any point, + * including when only some clusters have finished, in order to get "incremental" partial results. + * @param clusters The Clusters object for the search to report on the status of each cluster + * involved in the cross-cluster search + * @return merged response */ - SearchResponse getMergedResponse(Clusters clusters) { + public SearchResponse getMergedResponse(Clusters clusters) { // if the search is only across remote clusters, none of them are available, and all of them have skip_unavailable set to true, // we end up calling merge without anything to merge, we just return an empty search response if (searchResponses.size() == 0) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTask.java b/server/src/main/java/org/elasticsearch/action/search/SearchTask.java index b7e8de3b97b03..3bf72313c4c21 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTask.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTask.java @@ -21,6 +21,7 @@ public class SearchTask extends CancellableTask { // generating description in a lazy way since source can be quite big private final Supplier descriptionSupplier; private SearchProgressListener progressListener = SearchProgressListener.NOOP; + private Supplier searchResponseMergerSupplier; // used for CCS minimize_roundtrips=true public SearchTask( long id, @@ -53,4 +54,19 @@ public final SearchProgressListener getProgressListener() { return progressListener; } + /** + * @return the Supplier of {@link SearchResponseMerger} attached to this task. Will be null + * for local-only search and cross-cluster searches with minimize_roundtrips=false. + */ + public Supplier getSearchResponseMergerSupplier() { + return searchResponseMergerSupplier; + } + + /** + * @param supplier Attach a Supplier of {@link SearchResponseMerger} to this task. + * For use with CCS minimize_roundtrips=true + */ + public void setSearchResponseMergerSupplier(Supplier supplier) { + this.searchResponseMergerSupplier = supplier; + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 727e576764102..9c80e55a6f49d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -362,6 +362,7 @@ void executeRequest( .notifyListShards(Collections.emptyList(), Collections.emptyList(), clusters, false, timeProvider); } ccsRemoteReduce( + task, parentTaskId, rewritten, localIndices, @@ -496,6 +497,7 @@ public static boolean shouldMinimizeRoundtrips(SearchRequest searchRequest) { * Handles ccs_minimize_roundtrips=true */ static void ccsRemoteReduce( + SearchTask task, TaskId parentTaskId, SearchRequest searchRequest, OriginalIndices localIndices, @@ -532,7 +534,6 @@ static void ccsRemoteReduce( remoteClusterClient.search(ccsSearchRequest, new ActionListener<>() { @Override public void onResponse(SearchResponse searchResponse) { - // TODO: in CCS fail fast ticket we may need to fail the query if the cluster is marked as FAILED // overwrite the existing cluster entry with the updated one ccsClusterInfoUpdate(searchResponse, clusters, clusterAlias, skipUnavailable); Map profileResults = searchResponse.getProfileResults(); @@ -580,6 +581,9 @@ public void onFailure(Exception e) { timeProvider, aggReduceContextBuilder ); + task.setSearchResponseMergerSupplier( + () -> createSearchResponseMerger(searchRequest.source(), timeProvider, aggReduceContextBuilder) + ); final AtomicReference exceptions = new AtomicReference<>(); int totalClusters = remoteIndices.size() + (localIndices == null ? 0 : 1); final CountDown countDown = new CountDown(totalClusters); @@ -602,6 +606,7 @@ public void onFailure(Exception e) { exceptions, searchResponseMerger, clusters, + task.getProgressListener(), listener ); Client remoteClusterClient = remoteClusterService.getRemoteClusterClient( @@ -619,6 +624,7 @@ public void onFailure(Exception e) { exceptions, searchResponseMerger, clusters, + task.getProgressListener(), listener ); SearchRequest ccsLocalSearchRequest = SearchRequest.subSearchRequest( @@ -759,6 +765,7 @@ private static ActionListener createCCSListener( AtomicReference exceptions, SearchResponseMerger searchResponseMerger, SearchResponse.Clusters clusters, + SearchProgressListener progressListener, ActionListener originalListener ) { return new CCSActionListener<>( @@ -771,9 +778,9 @@ private static ActionListener createCCSListener( ) { @Override void innerOnResponse(SearchResponse searchResponse) { - // TODO: in CCS fail fast ticket we may need to fail the query if the cluster gets marked as FAILED ccsClusterInfoUpdate(searchResponse, clusters, clusterAlias, skipUnavailable); searchResponseMerger.add(searchResponse); + progressListener.notifyClusterResponseMinimizeRoundtrips(clusterAlias, searchResponse); } @Override @@ -1494,7 +1501,6 @@ public final void onFailure(Exception e) { if (cluster != null) { ccsClusterInfoUpdate(f, clusters, clusterAlias, true); } - // skippedClusters.incrementAndGet(); } else { if (cluster != null) { ccsClusterInfoUpdate(f, clusters, clusterAlias, false); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java index e81d7a2246e03..0d34634df5ec4 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java @@ -60,6 +60,7 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.test.InternalAggregationTestCase.emptyReduceContextBuilder; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -658,6 +659,7 @@ public void testMergeAggs() throws InterruptedException { ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); + try { addResponse(searchResponseMerger, searchResponse); } finally { @@ -820,6 +822,7 @@ public void testMergeSearchHits() throws InterruptedException { ShardSearchFailure.EMPTY_ARRAY, SearchResponseTests.randomClusters() ); + try { addResponse(searchResponseMerger, searchResponse); } finally { @@ -1119,6 +1122,437 @@ private static SearchHit[] randomSearchHitArray( return hits; } + /** + * Tests the partial results scenario used by MutableSearchResponse when + * doing cross-cluster search with minimize_roundtrips=true + */ + public void testPartialAggsMixedWithFullResponses() { + String maxAggName = "max123"; + String rangeAggName = "range123"; + + // partial aggs from local cluster (no search hits) + double value = 33.33; + int count = 33; + SearchResponse searchResponsePartialAggs = new SearchResponse( + SearchHits.empty(new TotalHits(0L, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), Float.NaN), + createDeterminsticAggregation(maxAggName, rangeAggName, value, count), + null, + false, + null, + null, + 1, + null, + 2, + 2, + 0, + 33, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + + // full response from remote1 remote cluster + value = 44.44; + count = 44; + String clusterAlias = "remote1"; + int total = 3; + int successful = 2; + int skipped = 1; + Index[] indices = new Index[] { new Index("foo_idx", "1bba9f5b-c5a1-4664-be1b-26be590c1aff") }; + SearchResponse searchResponseRemote1 = new SearchResponse( + createSimpleDeterministicSearchHits(clusterAlias, indices), + createDeterminsticAggregation(maxAggName, rangeAggName, value, count), + null, + false, + null, + null, + 1, + null, + total, + successful, + skipped, + 44, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + + // full response from remote2 remote cluster + value = 55.55; + count = 55; + clusterAlias = "remote2"; + total = 3; + successful = 2; + skipped = 1; + indices = new Index[] { new Index("foo_idx", "ae024679-097a-4a27-abf8-403f1e9189de") }; + SearchResponse searchResponseRemote2 = new SearchResponse( + createSimpleDeterministicSearchHits(clusterAlias, indices), + createDeterminsticAggregation(maxAggName, rangeAggName, value, count), + null, + false, + null, + null, + 1, + null, + total, + successful, + skipped, + 55, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + + SearchResponse.Clusters clusters = SearchResponseTests.createCCSClusterObject(3, 2, true, 2, 1, 0, 0, new ShardSearchFailure[0]); + + // merge partial aggs with remote1, check, then merge in remote2, check + try ( + SearchResponseMerger searchResponseMerger = new SearchResponseMerger( + 0, + 10, + 10, + new SearchTimeProvider(0, 0, () -> 0), + emptyReduceContextBuilder( + new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder(maxAggName)) + .addAggregator(new DateRangeAggregationBuilder(rangeAggName)) + ) + ) + ) { + searchResponseMerger.add(searchResponsePartialAggs); + searchResponseMerger.add(searchResponseRemote1); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + SearchHits hits = mergedResponse.getHits(); + assertThat(hits.getTotalHits().value, equalTo(2L)); // should be 2 hits from remote1 + SearchHit hit1 = hits.getHits()[0]; + String expectedHit1 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit1.toString(), expectedHit1); + + SearchHit hit2 = hits.getHits()[1]; + String expectedHit2 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit2.toString(), expectedHit2); + + double expectedMaxValue = 44.44; // value from remote1 + long expectedBucketsDocCount = 33 + 44; + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(expectedMaxValue, max.value(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(expectedBucketsDocCount, bucket.getDocCount()); + } finally { + mergedResponse.decRef(); + } + + searchResponseMerger.add(searchResponseRemote2); + mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + SearchHits hits = mergedResponse.getHits(); + assertThat(hits.getTotalHits().value, equalTo(4L)); // should be 2 hits from remote1, 2 from remote2 + + SearchHit hit1 = hits.getHits()[0]; + String expectedHit1 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit1.toString(), expectedHit1); + + SearchHit hit2 = hits.getHits()[1]; + String expectedHit2 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit2.toString(), expectedHit2); + + SearchHit hit3 = hits.getHits()[2]; + String expectedHit3 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit3.toString(), expectedHit3); + + SearchHit hit4 = hits.getHits()[3]; + String expectedHit4 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit4.toString(), expectedHit4); + + double expectedMaxValue = 55.55; // value from remote2 + long expectedBucketsDocCount = 33 + 44 + 55; + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(expectedMaxValue, max.value(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(expectedBucketsDocCount, bucket.getDocCount()); + } finally { + mergedResponse.decRef(); + } + } + + // merge remote1 and remote2, no partial aggs, check, then merge in partial aggs from local, check + try ( + SearchResponseMerger searchResponseMerger = new SearchResponseMerger( + 0, + 10, + 10, + new SearchTimeProvider(0, 0, () -> 0), + emptyReduceContextBuilder( + new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder(maxAggName)) + .addAggregator(new DateRangeAggregationBuilder(rangeAggName)) + ) + ) + ) { + searchResponseMerger.add(searchResponseRemote2); + searchResponseMerger.add(searchResponseRemote1); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + SearchHits hits = mergedResponse.getHits(); + SearchHit hit1 = hits.getHits()[0]; + String expectedHit1 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit1.toString(), expectedHit1); + + SearchHit hit2 = hits.getHits()[1]; + String expectedHit2 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit2.toString(), expectedHit2); + + SearchHit hit3 = hits.getHits()[2]; + String expectedHit3 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit3.toString(), expectedHit3); + + SearchHit hit4 = hits.getHits()[3]; + String expectedHit4 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit4.toString(), expectedHit4); + + double expectedMaxValue = 55.55; // value from remote2 + long expectedBucketsDocCount = 44 + 55; // missing 33 from local partial aggs + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(expectedMaxValue, max.value(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(expectedBucketsDocCount, bucket.getDocCount()); + } finally { + mergedResponse.decRef(); + } + + searchResponseMerger.add(searchResponsePartialAggs); + mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + SearchHits hits = mergedResponse.getHits(); + assertThat(hits.getTotalHits().value, equalTo(4L)); // should be 2 hits from remote1, 2 from remote2 + + SearchHit hit1 = hits.getHits()[0]; + String expectedHit1 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit1.toString(), expectedHit1); + + SearchHit hit2 = hits.getHits()[1]; + String expectedHit2 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit2.toString(), expectedHit2); + + SearchHit hit3 = hits.getHits()[2]; + String expectedHit3 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit3.toString(), expectedHit3); + + SearchHit hit4 = hits.getHits()[3]; + String expectedHit4 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit4.toString(), expectedHit4); + + double expectedMaxValue = 55.55; // value from remote2 + long expectedBucketsDocCount = 33 + 44 + 55; // contributions from all 3 search responses + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(expectedMaxValue, max.value(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(expectedBucketsDocCount, bucket.getDocCount()); + } finally { + mergedResponse.decRef(); + } + } + } + + private SearchHits createSimpleDeterministicSearchHits(String clusterAlias, Index[] indices) { + TotalHits totalHits = new TotalHits(2, TotalHits.Relation.EQUAL_TO); + final int numDocs = (int) totalHits.value; + int scoreFactor = 1; + float maxScore = numDocs; + int numFields = 1; + SortField[] sortFields = new SortField[numFields]; + sortFields[0] = SortField.FIELD_SCORE; + PriorityQueue priorityQueue = new PriorityQueue<>(new SearchHitComparator(sortFields)); + SearchHit[] hits = deterministicSearchHitArray(numDocs, clusterAlias, indices, maxScore, scoreFactor, sortFields, priorityQueue); + + SearchHits searchHits = new SearchHits( + hits, + totalHits, + maxScore == Float.NEGATIVE_INFINITY ? Float.NaN : maxScore, + sortFields, + null, + null + ); + return searchHits; + } + + private static InternalAggregations createDeterminsticAggregation(String maxAggName, String rangeAggName, double value, int count) { + Max max = new Max(maxAggName, value, DocValueFormat.RAW, Collections.emptyMap()); + InternalDateRange.Factory factory = new InternalDateRange.Factory(); + InternalDateRange.Bucket bucket = factory.createBucket( + "bucket", + 0D, + 10000D, + count, + InternalAggregations.EMPTY, + false, + DocValueFormat.RAW + ); + + InternalDateRange range = factory.create(rangeAggName, singletonList(bucket), DocValueFormat.RAW, false, emptyMap()); + InternalAggregations aggs = InternalAggregations.from(Arrays.asList(range, max)); + return aggs; + } + + private static SearchHit[] deterministicSearchHitArray( + int numDocs, + String clusterAlias, + Index[] indices, + float maxScore, + int scoreFactor, + SortField[] sortFields, + PriorityQueue priorityQueue + ) { + SearchHit[] hits = new SearchHit[numDocs]; + + int[] sortFieldFactors = new int[sortFields == null ? 0 : sortFields.length]; + for (int j = 0; j < sortFieldFactors.length; j++) { + sortFieldFactors[j] = 1; + } + + for (int j = 0; j < numDocs; j++) { + ShardId shardId = new ShardId(randomFrom(indices), j); + SearchShardTarget shardTarget = new SearchShardTarget("abc123", shardId, clusterAlias); + SearchHit hit = new SearchHit(j); + + float score = Float.NaN; + if (Float.isNaN(maxScore) == false) { + score = (maxScore - j) * scoreFactor; + hit.score(score); + } + + hit.shard(shardTarget); + if (sortFields != null) { + Object[] rawSortValues = new Object[sortFields.length]; + DocValueFormat[] docValueFormats = new DocValueFormat[sortFields.length]; + for (int k = 0; k < sortFields.length; k++) { + SortField sortField = sortFields[k]; + if (sortField == SortField.FIELD_SCORE) { + hit.score(score); + rawSortValues[k] = score; + } else { + rawSortValues[k] = sortField.getReverse() ? numDocs * sortFieldFactors[k] - j : j; + } + docValueFormats[k] = DocValueFormat.RAW; + } + hit.sortValues(rawSortValues, docValueFormats); + } + hits[j] = hit; + priorityQueue.add(hit); + } + return hits; + } + private static Map randomRealisticIndices(int numIndices, int numClusters) { String[] indicesNames = new String[numIndices]; for (int i = 0; i < numIndices; i++) { diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index fea6e39ea881b..2271821fc07da 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -509,8 +509,12 @@ public void testCCSRemoteReduceMergeFails() throws Exception { ActionListener.wrap(r -> fail("no response expected"), failure::set), latch ); + + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); TransportSearchAction.ccsRemoteReduce( - new TaskId("n", 1), + task, + parentTaskId, searchRequest, localIndices, remoteIndicesByCluster, @@ -566,6 +570,7 @@ public void testCCSRemoteReduce() throws Exception { service.start(); service.acceptIncomingRequests(); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); + // using from: 0 and size: 10 { SearchRequest searchRequest = new SearchRequest(); final CountDownLatch latch = new CountDownLatch(1); @@ -578,8 +583,11 @@ public void testCCSRemoteReduce() throws Exception { }), latch ); + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); TransportSearchAction.ccsRemoteReduce( - new TaskId("n", 1), + task, + parentTaskId, searchRequest, localIndices, remoteIndicesByCluster, @@ -617,6 +625,93 @@ public void testCCSRemoteReduce() throws Exception { searchResponse.decRef(); } } + + // using from: 5 and size: 6 + { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().from(5).size(6); + SearchRequest searchRequest = new SearchRequest(new String[] { "*", "*:*" }, sourceBuilder); + final CountDownLatch latch = new CountDownLatch(1); + SetOnce>> setOnce = new SetOnce<>(); + final SetOnce response = new SetOnce<>(); + LatchedActionListener listener = new LatchedActionListener<>( + ActionTestUtils.assertNoFailureListener(newValue -> { + newValue.incRef(); + response.set(newValue); + }), + latch + ); + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); + TransportSearchAction.ccsRemoteReduce( + task, + parentTaskId, + searchRequest, + localIndices, + remoteIndicesByCluster, + new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true, alias -> randomBoolean()), + timeProvider, + emptyReduceContextBuilder(), + remoteClusterService, + threadPool, + listener, + (r, l) -> setOnce.set(Tuple.tuple(r, l)) + ); + if (localIndices == null) { + assertNull(setOnce.get()); + } else { + Tuple> tuple = setOnce.get(); + assertEquals("", tuple.v1().getLocalClusterAlias()); + assertThat(tuple.v2(), instanceOf(TransportSearchAction.CCSActionListener.class)); + resolveWithEmptySearchResponse(tuple); + } + awaitLatch(latch, 5, TimeUnit.SECONDS); + + SearchResponse searchResponse = response.get(); + try { + assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SKIPPED)); + assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.RUNNING)); + assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.PARTIAL)); + assertEquals(0, searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.FAILED)); + assertEquals(totalClusters, searchResponse.getClusters().getTotal()); + assertEquals( + totalClusters, + searchResponse.getClusters().getClusterStateCount(SearchResponse.Cluster.Status.SUCCESSFUL) + ); + assertEquals(totalClusters == 1 ? 1 : totalClusters + 1, searchResponse.getNumReducePhases()); + } finally { + searchResponse.decRef(); + } + } + + } finally { + for (MockTransportService mockTransportService : mockTransportServices) { + mockTransportService.close(); + } + } + } + + public void testCCSRemoteReduceWhereRemoteClustersFail() throws Exception { + int numClusters = randomIntBetween(1, 10); + DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; + Map remoteIndicesByCluster = new HashMap<>(); + Settings.Builder builder = Settings.builder(); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + Settings settings = builder.build(); + boolean local = randomBoolean(); + OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); + try ( + MockTransportService service = MockTransportService.createNewService( + settings, + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool, + null + ) + ) { + service.start(); + service.acceptIncomingRequests(); + RemoteClusterService remoteClusterService = service.getRemoteClusterService(); { SearchRequest searchRequest = new SearchRequest(); searchRequest.preference("index_not_found"); @@ -627,8 +722,12 @@ public void testCCSRemoteReduce() throws Exception { ActionListener.wrap(r -> fail("no response expected"), failure::set), latch ); + + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); TransportSearchAction.ccsRemoteReduce( - new TaskId("n", 1), + task, + parentTaskId, searchRequest, localIndices, remoteIndicesByCluster, @@ -655,6 +754,37 @@ public void testCCSRemoteReduce() throws Exception { assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status()); } + } finally { + for (MockTransportService mockTransportService : mockTransportServices) { + mockTransportService.close(); + } + } + } + + public void testCCSRemoteReduceWithDisconnectedRemoteClusters() throws Exception { + int numClusters = randomIntBetween(1, 10); + DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; + Map remoteIndicesByCluster = new HashMap<>(); + Settings.Builder builder = Settings.builder(); + MockTransportService[] mockTransportServices = startTransport(numClusters, nodes, remoteIndicesByCluster, builder); + Settings settings = builder.build(); + boolean local = randomBoolean(); + OriginalIndices localIndices = local ? new OriginalIndices(new String[] { "index" }, SearchRequest.DEFAULT_INDICES_OPTIONS) : null; + int totalClusters = numClusters + (local ? 1 : 0); + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); + try ( + MockTransportService service = MockTransportService.createNewService( + settings, + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool, + null + ) + ) { + service.start(); + service.acceptIncomingRequests(); + RemoteClusterService remoteClusterService = service.getRemoteClusterService(); + int numDisconnectedClusters = randomIntBetween(1, numClusters); Set disconnectedNodes = Sets.newHashSetWithExpectedSize(numDisconnectedClusters); Set disconnectedNodesIndices = Sets.newHashSetWithExpectedSize(numDisconnectedClusters); @@ -687,8 +817,11 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti ActionListener.wrap(r -> fail("no response expected"), failure::set), latch ); + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); TransportSearchAction.ccsRemoteReduce( - new TaskId("n", 1), + task, + parentTaskId, searchRequest, localIndices, remoteIndicesByCluster, @@ -736,8 +869,11 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti if (localIndices != null) { clusterAliases.add(""); } + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); TransportSearchAction.ccsRemoteReduce( - new TaskId("n", 1), + task, + parentTaskId, searchRequest, localIndices, remoteIndicesByCluster, @@ -807,8 +943,11 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti if (localIndices != null) { clusterAliases.add(""); } + TaskId parentTaskId = new TaskId("n", 1); + SearchTask task = new SearchTask(2, "search", "search", () -> "desc", parentTaskId, Collections.emptyMap()); TransportSearchAction.ccsRemoteReduce( - new TaskId("n", 1), + task, + parentTaskId, searchRequest, localIndices, remoteIndicesByCluster, diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java index 481f5c79ba2ed..c167d74eb78d2 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java @@ -499,6 +499,19 @@ public void onFinalReduce(List shards, TotalHits totalHits, Interna searchResponse.get().updatePartialResponse(shards.size(), totalHits, () -> aggregations, reducePhase); } + /** + * Indicates that a cluster has finished a search operation. Used for CCS minimize_roundtrips=true only. + * + * @param clusterAlias alias of cluster that has finished a search operation and returned a SearchResponse. + * The cluster alias for the local cluster is RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY. + * @param clusterResponse SearchResponse from cluster 'clusterAlias' + */ + @Override + public void onClusterResponseMinimizeRoundtrips(String clusterAlias, SearchResponse clusterResponse) { + // no need to call the delegate progress listener, since this method is only called for minimize_roundtrips=true + searchResponse.get().updateResponseMinimizeRoundtrips(clusterAlias, clusterResponse); + } + @Override public void onResponse(SearchResponse response) { searchResponse.get().updateFinalResponse(response, ccsMinimizeRoundtrips); diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java index e50a4ce1ed94f..de360fd1c1bd4 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java @@ -11,6 +11,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponse.Clusters; +import org.elasticsearch.action.search.SearchResponseMerger; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -19,6 +20,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xpack.core.search.action.AsyncSearchResponse; import org.elasticsearch.xpack.core.search.action.AsyncStatusResponse; @@ -61,7 +63,20 @@ class MutableSearchResponse implements Releasable { private SearchResponse finalResponse; private ElasticsearchException failure; private Map> responseHeaders; - + /** + * Set to true when the local cluster has completed (its full SearchResponse + * has been received. Only used for CCS minimize_roundtrips=true. + */ + private boolean localClusterComplete; + /** + * For CCS minimize_roundtrips=true, we collect SearchResponses from each cluster in + * order to provide partial results before all clusters have reported back results. + */ + private List clusterResponses; + /** + * Set to true when the final SearchResponse has been received + * or a fatal error has occurred. + */ private boolean frozen; /** @@ -81,11 +96,16 @@ class MutableSearchResponse implements Releasable { this.isPartial = true; this.threadContext = threadContext; this.totalHits = EMPTY_TOTAL_HITS; + this.localClusterComplete = false; } /** * Updates the response with the result of a partial reduction. + * + * @param successfulShards + * @param totalHits * @param reducedAggs is a strategy for producing the reduced aggs + * @param reducePhase */ @SuppressWarnings("HiddenField") synchronized void updatePartialResponse( @@ -128,6 +148,24 @@ assert shardsInResponseMatchExpected(response, ccsMinimizeRoundtrips) this.frozen = true; } + /** + * Indicates that a cluster has finished a search operation. Used for CCS minimize_roundtrips=true only. + * + * @param clusterAlias alias of cluster that has finished a search operation and returned a SearchResponse. + * The cluster alias for the local cluster is RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY. + * @param clusterResponse SearchResponse from cluster 'clusterAlias' + */ + synchronized void updateResponseMinimizeRoundtrips(String clusterAlias, SearchResponse clusterResponse) { + if (clusterResponses == null) { + clusterResponses = new ArrayList<>(); + } + clusterResponses.add(clusterResponse); + clusterResponse.mustIncRef(); + if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias)) { + localClusterComplete = true; + } + } + private boolean isPartialResponse(SearchResponse response) { if (response.getClusters() == null) { return true; @@ -190,6 +228,7 @@ synchronized AsyncSearchResponse toAsyncSearchResponse(AsyncSearchTask task, lon if (restoreResponseHeaders && responseHeaders != null) { restoreResponseHeadersContext(threadContext, responseHeaders); } + SearchResponse searchResponse; if (finalResponse != null) { // We have a final response, use it. @@ -199,16 +238,43 @@ synchronized AsyncSearchResponse toAsyncSearchResponse(AsyncSearchTask task, lon // An error occurred before we got the shard list searchResponse = null; } else { - /* - * Build the response, reducing aggs if we haven't already and - * storing the result of the reduction, so we won't have to reduce - * the same aggregation results a second time if nothing has changed. - * This does cost memory because we have a reference to the finally - * reduced aggs sitting around which can't be GCed until we get an update. - */ - InternalAggregations reducedAggs = reducedAggsSource.get(); - reducedAggsSource = () -> reducedAggs; - searchResponse = buildResponse(task.getStartTimeNanos(), reducedAggs); + // partial results branch + SearchResponseMerger searchResponseMerger = createSearchResponseMerger(task); + try { + if (searchResponseMerger == null) { // local-only search or CCS MRT=false + /* + * Build the response, reducing aggs if we haven't already and + * storing the result of the reduction, so we won't have to reduce + * the same aggregation results a second time if nothing has changed. + * This does cost memory because we have a reference to the finally + * reduced aggs sitting around which can't be GCed until we get an update. + */ + InternalAggregations reducedAggs = reducedAggsSource.get(); + reducedAggsSource = () -> reducedAggs; + searchResponse = buildResponse(task.getStartTimeNanos(), reducedAggs); + } else if (localClusterComplete == false) { + /* + * For CCS MRT=true and the local cluster has reported back only partial results + * (subset of shards), so use SearchResponseMerger to do a merge of any full results that + * have come in from remote clusters and the partial results of the local cluster + */ + InternalAggregations reducedAggs = reducedAggsSource.get(); + reducedAggsSource = () -> reducedAggs; + SearchResponse partialAggsSearchResponse = buildResponse(task.getStartTimeNanos(), reducedAggs); + try { + searchResponse = getMergedResponse(searchResponseMerger, partialAggsSearchResponse); + } finally { + partialAggsSearchResponse.decRef(); + } + } else { + // For CCS MRT=true when the local cluster has reported back full results (via updateResponseMinimizeRoundtrips) + searchResponse = getMergedResponse(searchResponseMerger); + } + } finally { + if (searchResponseMerger != null) { + searchResponseMerger.close(); + } + } } try { return new AsyncSearchResponse( @@ -227,6 +293,41 @@ synchronized AsyncSearchResponse toAsyncSearchResponse(AsyncSearchTask task, lon } } + /** + * Creates a SearchResponseMerger from the Supplier of {@link SearchResponseMerger} held by the AsyncSearchTask. + * The supplier will be null for local-only searches and CCS minimize_roundtrips=true. In those cases, + * this method returns null. + * + * Otherwise, it creates a new SearchResponseMerger and populates it with all the SearchResponses + * received so far (via the updateResponseMinimizeRoundtrips method). + * + * @param task holds the Supplier of SearchResponseMerger + * @return SearchResponseMerger with all responses collected to so far or null + * (for local-only/CCS minimize_roundtrips=false) + */ + private SearchResponseMerger createSearchResponseMerger(AsyncSearchTask task) { + if (task.getSearchResponseMergerSupplier() == null) { + return null; // local search and CCS minimize_roundtrips=false + } + return task.getSearchResponseMergerSupplier().get(); + } + + private SearchResponse getMergedResponse(SearchResponseMerger merger) { + return getMergedResponse(merger, null); + } + + private SearchResponse getMergedResponse(SearchResponseMerger merger, SearchResponse localPartialAggsOnly) { + if (clusterResponses != null) { + for (SearchResponse response : clusterResponses) { + merger.add(response); + } + } + if (localPartialAggsOnly != null) { + merger.add(localPartialAggsOnly); + } + return merger.getMergedResponse(clusters); + } + /** * Creates an {@link AsyncStatusResponse} -- status of an async response. * Response is created based on the current state of the mutable response or based on {@code finalResponse} if it is available. @@ -373,9 +474,14 @@ private String getShardsInResponseMismatchInfo(SearchResponse response, boolean } @Override - public void close() { + public synchronized void close() { if (finalResponse != null) { finalResponse.decRef(); } + if (clusterResponses != null) { + for (SearchResponse clusterResponse : clusterResponses) { + clusterResponse.decRef(); + } + } } } From ca919d9f81b71d33b55916ee0c6ea546144dcd47 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Tue, 16 Jan 2024 15:06:44 +0100 Subject: [PATCH 46/95] Account for shard count in profiling test timeout (#104366) With this commit we increase the timeout to wait for a green cluster state in the integration tests for the profiling plugin. We do this because the tests create all profiling-related indices with more shards than usual and that might take longer than usual until the cluster is green. Closes #103809 --- .../org/elasticsearch/xpack/profiling/CancellationIT.java | 2 -- .../elasticsearch/xpack/profiling/GetFlameGraphActionIT.java | 3 --- .../elasticsearch/xpack/profiling/GetStackTracesActionIT.java | 2 -- .../org/elasticsearch/xpack/profiling/ProfilingTestCase.java | 4 +++- 4 files changed, 3 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java index fa28877f5b4c1..ef5198499ff09 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java @@ -10,7 +10,6 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.logging.log4j.LogManager; -import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; @@ -49,7 +48,6 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103809") public class CancellationIT extends ProfilingTestCase { @Override protected Collection> nodePlugins() { diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java index e0e4ef2a12985..8553574d39646 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java @@ -7,9 +7,6 @@ package org.elasticsearch.xpack.profiling; -import org.apache.lucene.tests.util.LuceneTestCase; - -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103809") public class GetFlameGraphActionIT extends ProfilingTestCase { public void testGetStackTracesUnfiltered() throws Exception { GetStackTracesRequest request = new GetStackTracesRequest(1000, 600.0d, 1.0d, null, null, null, null, null, null, null, null); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java index 9c60a6bcdfc1c..098023ad1841a 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java @@ -7,13 +7,11 @@ package org.elasticsearch.xpack.profiling; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import java.util.List; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103809") public class GetStackTracesActionIT extends ProfilingTestCase { public void testGetStackTracesUnfiltered() throws Exception { GetStackTracesRequest request = new GetStackTracesRequest(1000, 600.0d, 1.0d, null, null, null, null, null, null, null, null); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java index 82d6f6193505d..6424c0f3ae259 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.datastreams.DataStreamsPlugin; import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.plugins.Plugin; @@ -127,7 +128,8 @@ protected final void doSetupData() throws Exception { ); allIndices.add(apmTestIndex); waitForIndices(allIndices); - ensureGreen(allIndices.toArray(new String[0])); + // higher timeout since we have more shards than usual + ensureGreen(TimeValue.timeValueSeconds(120), allIndices.toArray(new String[0])); bulkIndex("data/profiling-events-all.ndjson"); bulkIndex("data/profiling-stacktraces.ndjson"); From 1f44cc5df9142fe3cd795d65947dbd2b5a487439 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Tue, 16 Jan 2024 16:50:36 +0200 Subject: [PATCH 47/95] Add comma escape character (#104274) * Add escape character for comma in CsvTestsDataLoader * Handle escaped commas in .csv-spec values as well * Change the way the indices are populated when running the loader from command line --- .../xpack/esql/CsvTestUtils.java | 49 +++++++++++--- .../xpack/esql/CsvTestsDataLoader.java | 66 +++++++++++++++---- 2 files changed, 93 insertions(+), 22 deletions(-) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index d49d5a964e944..08d837a9d802d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -61,6 +61,9 @@ public final class CsvTestUtils { private static final int MAX_WIDTH = 20; private static final CsvPreference CSV_SPEC_PREFERENCES = new CsvPreference.Builder('"', '|', "\r\n").build(); private static final String NULL_VALUE = "null"; + private static final char ESCAPE_CHAR = '\\'; + public static final String COMMA_ESCAPING_REGEX = "(?(); @@ -237,14 +242,20 @@ static String[] multiValuesAwareCsvToStringArray(String csvLine, int lineNumber) int pos = 0; // current position in the csv String int commaPos; // current "," character position + int previousCommaPos = 0; while ((commaPos = csvLine.indexOf(",", pos)) != -1 || pos <= csvLine.length()) { + if (commaPos > 0 && csvLine.charAt(commaPos - 1) == ESCAPE_CHAR) {// skip the escaped comma + pos = commaPos + 1;// moving on to the next character after comma + continue; + } + boolean isLastElement = commaPos == -1; - String entry = csvLine.substring(pos, isLastElement ? csvLine.length() : commaPos).trim(); + String entry = csvLine.substring(previousCommaPos, isLastElement ? csvLine.length() : commaPos).trim(); if (entry.startsWith("[")) { if (previousMvValue != null || (isLastElement && entry.endsWith("]") == false)) { String message = "Error line [{}:{}]: Unexpected start of a multi-value field value; current token [{}], " + (isLastElement ? "no closing point" : "previous token [{}]"); - throw new IllegalArgumentException(format(message, lineNumber, pos, entry, previousMvValue)); + throw new IllegalArgumentException(format(message, lineNumber, previousCommaPos, entry, previousMvValue)); } if (entry.endsWith("]")) { if (entry.length() > 2) {// single-valued multivalue field :shrug: @@ -263,7 +274,7 @@ static String[] multiValuesAwareCsvToStringArray(String csvLine, int lineNumber) format( "Error line [{}:{}]: Unexpected end of a multi-value field value (no previous starting point); found [{}]", lineNumber, - pos, + previousCommaPos, entry ) ); @@ -279,8 +290,8 @@ static String[] multiValuesAwareCsvToStringArray(String csvLine, int lineNumber) format( "Error line [{}:{}]: Unexpected missing value in a multi-value column; found [{}]", lineNumber, - pos, - csvLine.substring(pos - 1) + previousCommaPos, + csvLine.substring(previousCommaPos - 1) ) ); } @@ -290,12 +301,22 @@ static String[] multiValuesAwareCsvToStringArray(String csvLine, int lineNumber) } } pos = 1 + (isLastElement ? csvLine.length() : commaPos);// break out of the loop if it reached its last element + previousCommaPos = pos; } return mvCompressedEntries.toArray(String[]::new); } public record ExpectedResults(List columnNames, List columnTypes, List> values) {} + /** + * The method loads a section of a .csv-spec file representing the results of executing the query of that section. + * It reads both the schema (field names and their types) and the row values. + * Values starting with an opening square bracket and ending with a closing square bracket are considered multi-values. Inside + * these multi-values, commas separate the individual values and escaped commas are allowed with a prefixed \ + * default \ (backslash) character. + * @param csv a string representing the header and row values of a single query execution result + * @return data structure with column names, their types and values + */ public static ExpectedResults loadCsvSpecValues(String csv) { List columnNames; List columnTypes; @@ -338,13 +359,21 @@ public static ExpectedResults loadCsvSpecValues(String csv) { if (value.startsWith("[") ^ value.endsWith("]")) { throw new IllegalArgumentException("Incomplete multi-value (opening and closing square brackets) found " + value); } - if (value.contains(",") && value.startsWith("[")) {// commas outside a multi-value should be ok - List listOfMvValues = new ArrayList<>(); - for (String mvValue : delimitedListToStringArray(value.substring(1, value.length() - 1), ",")) { - listOfMvValues.add(columnTypes.get(i).convert(mvValue.trim())); + if (value.contains(",") && value.startsWith("[")) { + // split on commas but ignoring escaped commas + String[] multiValues = value.substring(1, value.length() - 1).split(COMMA_ESCAPING_REGEX); + if (multiValues.length > 0) { + List listOfMvValues = new ArrayList<>(); + for (String mvValue : multiValues) { + listOfMvValues.add(columnTypes.get(i).convert(mvValue.trim().replace(ESCAPED_COMMA_SEQUENCE, ","))); + } + rowValues.add(listOfMvValues); + } else { + rowValues.add(columnTypes.get(i).convert(value.replace(ESCAPED_COMMA_SEQUENCE, ","))); } - rowValues.add(listOfMvValues); } else { + // The value considered here is the one where any potential escaped comma is kept as is (with the escape char) + // TODO if we'd want escaped commas outside multi-values fields, we'd have to adjust this value here as well rowValues.add(columnTypes.get(i).convert(value)); } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 3df70b3b83d37..8641c2511b199 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -42,8 +42,9 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.common.Strings.delimitedListToStringArray; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.esql.CsvTestUtils.COMMA_ESCAPING_REGEX; +import static org.elasticsearch.xpack.esql.CsvTestUtils.ESCAPED_COMMA_SEQUENCE; import static org.elasticsearch.xpack.esql.CsvTestUtils.multiValuesAwareCsvToStringArray; public class CsvTestsDataLoader { @@ -137,17 +138,33 @@ public static void main(String[] args) throws IOException { } try (RestClient client = builder.build()) { - loadDataSetIntoEs(client); + loadDataSetIntoEs(client, (restClient, indexName, indexMapping) -> { + Request request = new Request("PUT", "/" + indexName); + request.setJsonEntity("{\"mappings\":" + indexMapping + "}"); + restClient.performRequest(request); + }); } } + private static void loadDataSetIntoEs(RestClient client, IndexCreator indexCreator) throws IOException { + loadDataSetIntoEs(client, LogManager.getLogger(CsvTestsDataLoader.class), indexCreator); + } + public static void loadDataSetIntoEs(RestClient client) throws IOException { - loadDataSetIntoEs(client, LogManager.getLogger(CsvTestsDataLoader.class)); + loadDataSetIntoEs(client, (restClient, indexName, indexMapping) -> { + ESRestTestCase.createIndex(restClient, indexName, null, indexMapping, null); + }); } public static void loadDataSetIntoEs(RestClient client, Logger logger) throws IOException { + loadDataSetIntoEs(client, logger, (restClient, indexName, indexMapping) -> { + ESRestTestCase.createIndex(restClient, indexName, null, indexMapping, null); + }); + } + + private static void loadDataSetIntoEs(RestClient client, Logger logger, IndexCreator indexCreator) throws IOException { for (var dataSet : CSV_DATASET_MAP.values()) { - load(client, dataSet.indexName, "/" + dataSet.mappingFileName, "/" + dataSet.dataFileName, logger); + load(client, dataSet.indexName, "/" + dataSet.mappingFileName, "/" + dataSet.dataFileName, logger, indexCreator); } forceMerge(client, CSV_DATASET_MAP.keySet(), logger); for (var policy : ENRICH_POLICIES) { @@ -169,7 +186,14 @@ private static void loadEnrichPolicy(RestClient client, String policyName, Strin client.performRequest(request); } - private static void load(RestClient client, String indexName, String mappingName, String dataName, Logger logger) throws IOException { + private static void load( + RestClient client, + String indexName, + String mappingName, + String dataName, + Logger logger, + IndexCreator indexCreator + ) throws IOException { URL mapping = CsvTestsDataLoader.class.getResource(mappingName); if (mapping == null) { throw new IllegalArgumentException("Cannot find resource " + mappingName); @@ -178,14 +202,10 @@ private static void load(RestClient client, String indexName, String mappingName if (data == null) { throw new IllegalArgumentException("Cannot find resource " + dataName); } - createTestIndex(client, indexName, readTextFile(mapping)); + indexCreator.createIndex(client, indexName, readTextFile(mapping)); loadCsvData(client, indexName, data, CsvTestsDataLoader::createParser, logger); } - private static void createTestIndex(RestClient client, String indexName, String mapping) throws IOException { - ESRestTestCase.createIndex(client, indexName, null, mapping, null); - } - public static String readTextFile(URL resource) throws IOException { try (BufferedReader reader = TestUtils.reader(resource)) { StringBuilder b = new StringBuilder(); @@ -198,6 +218,20 @@ public static String readTextFile(URL resource) throws IOException { } @SuppressWarnings("unchecked") + /** + * Loads a classic csv file in an ES cluster using a RestClient. + * The structure of the file is as follows: + * - commented lines should start with "//" + * - the first non-comment line from the file is the schema line (comma separated field_name:ES_data_type elements) + * - sub-fields should be placed after the root field using a dot notation for the name: + * root_field:long,root_field.sub_field:integer + * - a special _id field can be used in the schema and the values of this field will be used in the bulk request as actual doc ids + * - all subsequent non-comment lines represent the values that will be used to build the _bulk request + * - an empty string "" refers to a null value + * - a value starting with an opening square bracket "[" and ending with a closing square bracket "]" refers to a multi-value field + * - multi-values are comma separated + * - commas inside multivalue fields can be escaped with \ (backslash) character + */ private static void loadCsvData( RestClient client, String indexName, @@ -278,9 +312,11 @@ private static void loadCsvData( if (i > 0 && row.length() > 0) { row.append(","); } - if (entries[i].contains(",")) {// multi-value + // split on comma ignoring escaped commas + String[] multiValues = entries[i].split(COMMA_ESCAPING_REGEX); + if (multiValues.length > 0) {// multi-value StringBuilder rowStringValue = new StringBuilder("["); - for (String s : delimitedListToStringArray(entries[i], ",")) { + for (String s : multiValues) { rowStringValue.append("\"" + s + "\","); } // remove the last comma and put a closing bracket instead @@ -289,6 +325,8 @@ private static void loadCsvData( } else { entries[i] = "\"" + entries[i] + "\""; } + // replace any escaped commas with single comma + entries[i].replace(ESCAPED_COMMA_SEQUENCE, ","); row.append("\"" + columns[i] + "\":" + entries[i]); } catch (Exception e) { throw new IllegalArgumentException( @@ -356,4 +394,8 @@ private static XContentParser createParser(XContent xContent, InputStream data) public record TestsDataset(String indexName, String mappingFileName, String dataFileName) {} public record EnrichConfig(String policyName, String policyFileName) {} + + private interface IndexCreator { + void createIndex(RestClient client, String indexName, String mapping) throws IOException; + } } From 932208ce19700321b39ae6c5a91956898242389b Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Tue, 16 Jan 2024 16:22:52 +0100 Subject: [PATCH 48/95] Add test coverage for permission behaviour in SymbolicLinkPreservingTar (#104398) --- .../SymbolicLinkPreservingTarFuncTest.groovy | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy index 144307912101c..beac84876ed71 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy @@ -36,6 +36,11 @@ class SymbolicLinkPreservingTarFuncTest extends AbstractGradleFuncTest { final Path linkToRealFolder = archiveSourceRoot.resolve("link-to-real-folder"); Files.createSymbolicLink(linkToRealFolder, Paths.get("./real-folder")); + final Path realFolder2 = testProjectDir.getRoot().toPath().resolve("real-folder2") + final Path realFolderSub = realFolder2.resolve("sub") + Files.createDirectory(realFolder2); + Files.createDirectory(realFolderSub); + buildFile << """ import org.elasticsearch.gradle.internal.SymbolicLinkPreservingTar @@ -56,6 +61,12 @@ tasks.register("buildBZip2Tar", SymbolicLinkPreservingTar) { SymbolicLinkPreserv tar.compression = Compression.BZIP2 tar.preserveFileTimestamps = ${preserverTimestamp} from fileTree("archiveRoot") + + into('config') { + dirMode 0750 + fileMode 0660 + from "real-folder2" + } } """ when: @@ -117,15 +128,22 @@ tasks.register("buildTar", SymbolicLinkPreservingTar) { SymbolicLinkPreservingTa while (entry != null) { if (entry.getName().equals("real-folder/")) { assert entry.isDirectory() + assert entry.getMode() == 16877 realFolderEntry = true - } else if (entry.getName().equals("real-folder/file")) { + } else if (entry.getName().equals("real-folder/file")) { assert entry.isFile() fileEntry = true } else if (entry.getName().equals("real-folder/link-to-file")) { assert entry.isSymbolicLink() assert normalized(entry.getLinkName()) == "./file" linkToFileEntry = true - } else if (entry.getName().equals("link-in-folder/")) { + } else if (entry.getName().equals("config/")) { + assert entry.isDirectory() + assert entry.getMode() == 16877 + } else if (entry.getName().equals("config/sub/")) { + assert entry.isDirectory() + assert entry.getMode() == 16872 + }else if (entry.getName().equals("link-in-folder/")) { assert entry.isDirectory() linkInFolderEntry = true } else if (entry.getName().equals("link-in-folder/link-to-file")) { From af50962ec312cef2d855977924d0821879936f50 Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Tue, 16 Jan 2024 10:54:17 -0500 Subject: [PATCH 49/95] Fix CoreFullClusterRestartIT testRollover (#104373) --- .../elasticsearch/upgrades/FullClusterRestartIT.java | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 16209a73826ca..99b40b0f5c101 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -605,7 +605,7 @@ public void testShrinkAfterUpgrade() throws IOException { *
  • Make sure the document count is correct * */ - public void testRollover() throws IOException { + public void testRollover() throws Exception { if (isRunningAgainstOldCluster()) { client().performRequest( newXContentRequest( @@ -637,9 +637,12 @@ public void testRollover() throws IOException { ) ); - assertThat( - EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices?v")).getEntity()), - containsString("testrollover-000002") + // assertBusy to work around https://github.com/elastic/elasticsearch/issues/104371 + assertBusy( + () -> assertThat( + EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices?v&error_trace")).getEntity()), + containsString("testrollover-000002") + ) ); } From 51caf171bc2cf7bedeefdb7fcc8c2e400d4662e3 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 16 Jan 2024 16:56:29 +0100 Subject: [PATCH 50/95] Remove redudant children of BroadcastResponse (#104410) A couple of children of `BroadCastResponse` are completely redundant, adding no extra fields or separate serialization. Removed them and replaced their use by the broadcast response itself. --- .../DataStreamLifecycleServiceIT.java | 4 +- .../lifecycle/DataStreamLifecycleService.java | 4 +- .../DataStreamLifecycleServiceTests.java | 12 ++-- .../ingest/geoip/GeoIpDownloaderTests.java | 48 +++++++++------ .../AbstractAsyncBulkByScrollAction.java | 6 +- .../s3/S3BlobStoreRepositoryTests.java | 6 +- .../org/elasticsearch/search/CCSDuelIT.java | 4 +- .../clear/ClearIndicesCacheBlocksIT.java | 3 +- .../admin/indices/flush/FlushBlocksIT.java | 3 +- .../forcemerge/ForceMergeBlocksIT.java | 5 +- .../indices/forcemerge/ForceMergeIT.java | 6 +- .../indices/refresh/RefreshBlocksIT.java | 3 +- .../cluster/routing/ShardRoutingRoleIT.java | 4 +- .../document/DocumentActionsIT.java | 16 +++-- .../org/elasticsearch/get/GetActionIT.java | 4 +- .../indices/IndexingMemoryControllerIT.java | 4 +- .../indices/IndicesRequestCacheIT.java | 14 ++--- .../elasticsearch/indices/flush/FlushIT.java | 6 +- .../mapping/UpdateMappingIntegrationIT.java | 4 +- .../RandomExceptionCircuitBreakerIT.java | 4 +- .../indices/stats/IndexStatsIT.java | 4 +- .../recovery/RecoveryWhileUnderLoadIT.java | 4 +- .../recovery/SimpleRecoveryIT.java | 7 +-- .../basic/SearchWhileCreatingIndexIT.java | 4 +- .../basic/SearchWithRandomExceptionsIT.java | 4 +- .../basic/SearchWithRandomIOExceptionsIT.java | 4 +- .../basic/TransportSearchFailuresIT.java | 4 +- .../suggest/CompletionSuggestSearchIT.java | 4 +- .../snapshots/BlobStoreIncrementalityIT.java | 4 +- .../SharedClusterSnapshotRestoreIT.java | 4 +- .../cache/clear/ClearIndicesCacheAction.java | 5 +- .../ClearIndicesCacheRequestBuilder.java | 3 +- .../clear/ClearIndicesCacheResponse.java | 61 ------------------- .../TransportClearIndicesCacheAction.java | 7 ++- .../admin/indices/flush/FlushAction.java | 5 +- .../admin/indices/flush/FlushRequest.java | 1 - .../indices/flush/FlushRequestBuilder.java | 3 +- .../admin/indices/flush/FlushResponse.java | 52 ---------------- .../indices/flush/TransportFlushAction.java | 7 ++- .../indices/forcemerge/ForceMergeAction.java | 5 +- .../indices/forcemerge/ForceMergeRequest.java | 1 - .../forcemerge/ForceMergeRequestBuilder.java | 3 +- .../forcemerge/ForceMergeResponse.java | 61 ------------------- .../forcemerge/TransportForceMergeAction.java | 7 ++- .../admin/indices/refresh/RefreshAction.java | 5 +- .../admin/indices/refresh/RefreshRequest.java | 1 - .../refresh/RefreshRequestBuilder.java | 3 +- .../indices/refresh/RefreshResponse.java | 57 ----------------- .../refresh/TransportRefreshAction.java | 7 ++- .../broadcast/BaseBroadcastResponse.java | 2 +- .../client/internal/IndicesAdminClient.java | 21 +++---- .../internal/support/AbstractClient.java | 21 +++---- .../admin/indices/RestForceMergeAction.java | 4 +- .../admin/indices/RestSyncedFlushAction.java | 8 +-- .../clear/ClearIndicesCacheResponseTests.java | 33 ---------- .../indices/flush/FlushResponseTests.java | 33 ---------- .../forcemerge/ForceMergeResponseTests.java | 32 ---------- .../indices/refresh/RefreshResponseTests.java | 33 ---------- .../BroadcastReplicationTests.java | 6 +- .../FieldStatsProviderRefreshTests.java | 4 +- .../AbstractIndexRecoveryIntegTestCase.java | 4 +- ...ESMockAPIBasedRepositoryIntegTestCase.java | 6 +- .../elasticsearch/test/ESIntegTestCase.java | 16 +++-- .../test/rest/ESRestTestCase.java | 28 +++++++-- .../elasticsearch/xpack/CcrIntegTestCase.java | 6 +- .../xpack/core/ilm/ForceMergeStepTests.java | 14 ++--- .../downsample/TransportDownsampleAction.java | 6 +- .../downsample/DownsampleDataStreamTests.java | 4 +- .../xpack/graph/test/GraphTests.java | 4 +- .../ml/integration/MlNativeIntegTestCase.java | 4 +- .../integration/TrainedModelProviderIT.java | 6 +- .../process/ChunkedTrainedModelPersister.java | 6 +- .../steps/AbstractDataFrameAnalyticsStep.java | 4 +- .../ml/dataframe/steps/AnalysisStep.java | 4 +- .../xpack/ml/dataframe/steps/FinalStep.java | 6 +- .../ml/dataframe/steps/InferenceStep.java | 4 +- .../persistence/TrainedModelProvider.java | 4 +- .../ml/job/persistence/JobDataDeleter.java | 4 +- .../ChunkedTrainedModelPersisterTests.java | 4 +- .../xpack/rollup/job/RollupJobTask.java | 6 +- .../xpack/rollup/job/RollupJobTaskTests.java | 8 +-- .../SearchableSnapshotsLicenseIntegTests.java | 6 +- ...ableSnapshotsBlobStoreCacheIntegTests.java | 7 +-- ...tsBlobStoreCacheMaintenanceIntegTests.java | 4 +- .../shared/NodesCachesStatsIntegTests.java | 4 +- .../ClearSearchableSnapshotsCacheAction.java | 5 +- ...ClearSearchableSnapshotsCacheResponse.java | 30 --------- ...rtClearSearchableSnapshotsCacheAction.java | 7 ++- .../integration/DlsFlsRequestCacheTests.java | 7 +-- .../security/authc/ApiKeyIntegTests.java | 4 +- ...sportSamlInvalidateSessionActionTests.java | 4 +- .../IndexBasedTransformConfigManager.java | 4 +- .../test/integration/SingleNodeTests.java | 4 +- .../xpack/watcher/WatcherService.java | 4 +- .../xpack/watcher/WatcherServiceTests.java | 6 +- .../execution/TriggeredWatchStoreTests.java | 10 +-- 96 files changed, 293 insertions(+), 655 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponseTests.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushResponseTests.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java delete mode 100644 x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheResponse.java diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index f34bb96b3eb81..9880e5e9914a8 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsAction; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; @@ -30,6 +29,7 @@ import org.elasticsearch.action.datastreams.lifecycle.ErrorEntry; import org.elasticsearch.action.datastreams.lifecycle.ExplainIndexDataStreamLifecycle; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; @@ -313,7 +313,7 @@ public void testAutomaticForceMerge() throws Exception { for (int i = 0; i < randomIntBetween(10, 50); i++) { indexDocs(dataStreamName, randomIntBetween(1, 300)); // Make sure the segments get written: - FlushResponse flushResponse = indicesAdmin().flush(new FlushRequest(toBeRolledOverIndex)).actionGet(); + BroadcastResponse flushResponse = indicesAdmin().flush(new FlushRequest(toBeRolledOverIndex)).actionGet(); assertThat(flushResponse.getStatus(), equalTo(RestStatus.OK)); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index eff40cb1dbe62..8b15d6a4b7bdf 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockAction; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequest; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockResponse; @@ -33,6 +32,7 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -1168,7 +1168,7 @@ private void forceMergeIndex(ForceMergeRequest forceMergeRequest, ActionListener logger.info("Data stream lifecycle is issuing a request to force merge index [{}]", targetIndex); client.admin().indices().forceMerge(forceMergeRequest, new ActionListener<>() { @Override - public void onResponse(ForceMergeResponse forceMergeResponse) { + public void onResponse(BroadcastResponse forceMergeResponse) { if (forceMergeResponse.getFailedShards() > 0) { DefaultShardOperationFailedException[] failures = forceMergeResponse.getShardFailures(); String message = Strings.format( diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index befa16573de23..15f526d0a06d6 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequest; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.RolloverConditions; @@ -27,6 +26,7 @@ import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -578,7 +578,7 @@ public void testForceMerge() throws Exception { // We want this test method to get fake force merge responses, because this is what triggers a cluster state update clientDelegate = (action, request, listener) -> { if (action.name().equals("indices:admin/forcemerge")) { - listener.onResponse(new ForceMergeResponse(5, 5, 0, List.of())); + listener.onResponse(new BroadcastResponse(5, 5, 0, List.of())); } }; String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); @@ -748,7 +748,7 @@ public void testForceMergeRetries() throws Exception { clientDelegate = (action, request, listener) -> { if (action.name().equals("indices:admin/forcemerge")) { listener.onResponse( - new ForceMergeResponse( + new BroadcastResponse( 5, 5, 1, @@ -779,7 +779,7 @@ public void testForceMergeRetries() throws Exception { AtomicInteger forceMergeFailedCount = new AtomicInteger(0); clientDelegate = (action, request, listener) -> { if (action.name().equals("indices:admin/forcemerge")) { - listener.onResponse(new ForceMergeResponse(5, 4, 0, List.of())); + listener.onResponse(new BroadcastResponse(5, 4, 0, List.of())); forceMergeFailedCount.incrementAndGet(); } }; @@ -800,7 +800,7 @@ public void testForceMergeRetries() throws Exception { // For the final data stream lifecycle run, we let forcemerge run normally clientDelegate = (action, request, listener) -> { if (action.name().equals("indices:admin/forcemerge")) { - listener.onResponse(new ForceMergeResponse(5, 5, 0, List.of())); + listener.onResponse(new BroadcastResponse(5, 5, 0, List.of())); } }; dataStreamLifecycleService.run(clusterService.state()); @@ -900,7 +900,7 @@ public void testForceMergeDedup() throws Exception { setState(clusterService, state); clientDelegate = (action, request, listener) -> { if (action.name().equals("indices:admin/forcemerge")) { - listener.onResponse(new ForceMergeResponse(5, 5, 0, List.of())); + listener.onResponse(new BroadcastResponse(5, 5, 0, List.of())); } }; for (int i = 0; i < 100; i++) { diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index 7fdce03252687..baf3006378054 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -17,13 +17,12 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.flush.FlushAction; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.TransportIndexAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -178,28 +177,34 @@ public int read() throws IOException { } public void testIndexChunksNoData() throws IOException { - client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { + client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); - flushResponseActionListener.onResponse(mock(FlushResponse.class)); - }); - client.addHandler(RefreshAction.INSTANCE, (RefreshRequest request, ActionListener flushResponseActionListener) -> { - assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); - flushResponseActionListener.onResponse(mock(RefreshResponse.class)); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); }); + client.addHandler( + RefreshAction.INSTANCE, + (RefreshRequest request, ActionListener flushResponseActionListener) -> { + assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); + } + ); InputStream empty = new ByteArrayInputStream(new byte[0]); assertEquals(0, geoIpDownloader.indexChunks("test", empty, 0, "d41d8cd98f00b204e9800998ecf8427e", 0)); } public void testIndexChunksMd5Mismatch() { - client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { - assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); - flushResponseActionListener.onResponse(mock(FlushResponse.class)); - }); - client.addHandler(RefreshAction.INSTANCE, (RefreshRequest request, ActionListener flushResponseActionListener) -> { + client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); - flushResponseActionListener.onResponse(mock(RefreshResponse.class)); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); }); + client.addHandler( + RefreshAction.INSTANCE, + (RefreshRequest request, ActionListener flushResponseActionListener) -> { + assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); + } + ); IOException exception = expectThrows( IOException.class, @@ -232,14 +237,17 @@ public void testIndexChunks() throws IOException { assertEquals(chunk + 15, source.get("chunk")); listener.onResponse(mock(IndexResponse.class)); }); - client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { - assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); - flushResponseActionListener.onResponse(mock(FlushResponse.class)); - }); - client.addHandler(RefreshAction.INSTANCE, (RefreshRequest request, ActionListener flushResponseActionListener) -> { + client.addHandler(FlushAction.INSTANCE, (FlushRequest request, ActionListener flushResponseActionListener) -> { assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); - flushResponseActionListener.onResponse(mock(RefreshResponse.class)); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); }); + client.addHandler( + RefreshAction.INSTANCE, + (RefreshRequest request, ActionListener flushResponseActionListener) -> { + assertArrayEquals(new String[] { GeoIpDownloader.DATABASES_INDEX }, request.indices()); + flushResponseActionListener.onResponse(mock(BroadcastResponse.class)); + } + ); InputStream big = new ByteArrayInputStream(bigArray); assertEquals(17, geoIpDownloader.indexChunks("test", big, 15, "a67563dfa8f3cba8b8cff61eb989a749", 0)); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java index fcea4618f4cd4..609702a58bf84 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/AbstractAsyncBulkByScrollAction.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; @@ -24,6 +23,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -554,9 +554,9 @@ void refreshAndFinish(List indexingFailures, List search RefreshRequest refresh = new RefreshRequest(); refresh.indices(destinationIndices.toArray(new String[destinationIndices.size()])); logger.debug("[{}]: refreshing", task.getId()); - bulkClient.admin().indices().refresh(refresh, new ActionListener() { + bulkClient.admin().indices().refresh(refresh, new ActionListener<>() { @Override - public void onResponse(RefreshResponse response) { + public void onResponse(BroadcastResponse response) { finishHim(null, indexingFailures, searchFailures, timedOut); } diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index e70151cbdf8ee..9ad2c57b7f585 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -15,8 +15,8 @@ import com.sun.net.httpserver.HttpHandler; import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.blobstore.BlobContainer; @@ -191,7 +191,7 @@ public void testAbortRequestStats() throws Exception { waitForDocs(nbDocs, indexer); } flushAndRefresh(index); - ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + BroadcastResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); @@ -234,7 +234,7 @@ public void testMetrics() throws Exception { waitForDocs(nbDocs, indexer); } flushAndRefresh(index); - ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + BroadcastResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index 9c5415f1d5ea9..ca9528005758a 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -17,8 +17,8 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.tests.util.TimeUnits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.aggregations.pipeline.DerivativePipelineAggregationBuilder; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -199,7 +199,7 @@ private void indexDocuments(String idPrefix) throws IOException, InterruptedExce assertTrue(latch.await(30, TimeUnit.SECONDS)); - RefreshResponse refreshResponse = refresh(INDEX_NAME); + BroadcastResponse refreshResponse = refresh(INDEX_NAME); ElasticsearchAssertions.assertNoFailures(refreshResponse); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java index 4d37f75894d56..e0805148a47e3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.cache.clear; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -33,7 +34,7 @@ public void testClearIndicesCacheWithBlocks() { for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { try { enableIndexBlock("test", blockSetting); - ClearIndicesCacheResponse clearIndicesCacheResponse = indicesAdmin().prepareClearCache("test") + BroadcastResponse clearIndicesCacheResponse = indicesAdmin().prepareClearCache("test") .setFieldDataCache(true) .setQueryCache(true) .setFieldDataCache(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java index 69d4f7aaef329..4e2fade87196f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.flush; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -44,7 +45,7 @@ public void testFlushWithBlocks() { )) { try { enableIndexBlock("test", blockSetting); - FlushResponse response = indicesAdmin().prepareFlush("test").get(); + BroadcastResponse response = indicesAdmin().prepareFlush("test").get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java index a3474afc96c51..b5d8ef0308b91 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeBlocksIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.forcemerge; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -50,7 +51,7 @@ public void testForceMergeWithBlocks() { for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY_ALLOW_DELETE)) { try { enableIndexBlock("test", blockSetting); - ForceMergeResponse response = indicesAdmin().prepareForceMerge("test").get(); + BaseBroadcastResponse response = indicesAdmin().prepareForceMerge("test").get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { @@ -70,7 +71,7 @@ public void testForceMergeWithBlocks() { // Merging all indices is blocked when the cluster is read-only try { - ForceMergeResponse response = indicesAdmin().prepareForceMerge().get(); + BaseBroadcastResponse response = indicesAdmin().prepareForceMerge().get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeIT.java index 229558e9f4242..22bc37b2fb946 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeIT.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.forcemerge; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -51,13 +51,13 @@ public void testForceMergeUUIDConsistent() throws IOException { assertThat(getForceMergeUUID(primary), nullValue()); assertThat(getForceMergeUUID(replica), nullValue()); - final ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge(index).setMaxNumSegments(1).get(); + final BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge(index).setMaxNumSegments(1).get(); assertThat(forceMergeResponse.getFailedShards(), is(0)); assertThat(forceMergeResponse.getSuccessfulShards(), is(2)); // Force flush to force a new commit that contains the force flush UUID - final FlushResponse flushResponse = indicesAdmin().prepareFlush(index).setForce(true).get(); + final BroadcastResponse flushResponse = indicesAdmin().prepareFlush(index).setForce(true).get(); assertThat(flushResponse.getFailedShards(), is(0)); assertThat(flushResponse.getSuccessfulShards(), is(2)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java index 41abfc1219199..2067038e0fdd2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.refresh; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; @@ -39,7 +40,7 @@ public void testRefreshWithBlocks() { )) { try { enableIndexBlock("test", blockSetting); - RefreshResponse response = indicesAdmin().prepareRefresh("test").get(); + BroadcastResponse response = indicesAdmin().prepareRefresh("test").get(); assertNoFailures(response); assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index 8843e7ff39bc6..895a60133251f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -12,12 +12,12 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction; import org.elasticsearch.action.search.ClosePointInTimeRequest; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.TransportClosePointInTimeAction; import org.elasticsearch.action.search.TransportOpenPointInTimeAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -699,7 +699,7 @@ public void testRefreshFailsIfUnpromotableDisconnects() throws Exception { }); } - RefreshResponse response = indicesAdmin().prepareRefresh(INDEX_NAME).get(); + BroadcastResponse response = indicesAdmin().prepareRefresh(INDEX_NAME).get(); assertThat( "each unpromotable replica shard should be added to the shard failures", response.getFailedShards(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java index d3001f485846e..709f6b866ba28 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/document/DocumentActionsIT.java @@ -10,17 +10,15 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Strings; import org.elasticsearch.test.ESIntegTestCase; @@ -63,7 +61,7 @@ public void testIndexActions() throws Exception { assertThat(indexResponse.getIndex(), equalTo(getConcreteIndexName())); assertThat(indexResponse.getId(), equalTo("1")); logger.info("Refreshing"); - RefreshResponse refreshResponse = refresh(); + BroadcastResponse refreshResponse = refresh(); assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); logger.info("--> index exists?"); @@ -72,7 +70,7 @@ public void testIndexActions() throws Exception { assertThat(indexExists("test1234565"), equalTo(false)); logger.info("Clearing cache"); - ClearIndicesCacheResponse clearIndicesCacheResponse = indicesAdmin().clearCache( + BroadcastResponse clearIndicesCacheResponse = indicesAdmin().clearCache( new ClearIndicesCacheRequest("test").fieldDataCache(true).queryCache(true) ).actionGet(); assertNoFailures(clearIndicesCacheResponse); @@ -80,7 +78,7 @@ public void testIndexActions() throws Exception { logger.info("Force Merging"); waitForRelocation(ClusterHealthStatus.GREEN); - ForceMergeResponse mergeResponse = forceMerge(); + BaseBroadcastResponse mergeResponse = forceMerge(); assertThat(mergeResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); GetResponse getResult; @@ -130,7 +128,7 @@ public void testIndexActions() throws Exception { client().index(new IndexRequest("test").id("2").source(source("2", "test2"))).actionGet(); logger.info("Flushing"); - FlushResponse flushResult = indicesAdmin().prepareFlush("test").get(); + BroadcastResponse flushResult = indicesAdmin().prepareFlush("test").get(); assertThat(flushResult.getSuccessfulShards(), equalTo(numShards.totalNumShards)); assertThat(flushResult.getFailedShards(), equalTo(0)); logger.info("Refreshing"); @@ -220,7 +218,7 @@ public void testBulk() throws Exception { assertThat(bulkResponse.getItems()[5].getIndex(), equalTo(getConcreteIndexName())); waitForRelocation(ClusterHealthStatus.GREEN); - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh("test").get(); assertNoFailures(refreshResponse); assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java index d4fe2fcb4d4c1..c9809574002c8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequestBuilder; import org.elasticsearch.action.get.GetResponse; @@ -18,6 +17,7 @@ import org.elasticsearch.action.get.MultiGetRequestBuilder; import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; @@ -641,7 +641,7 @@ public void testGetFieldsComplexField() throws Exception { ensureGreen(); logger.info("flushing"); - FlushResponse flushResponse = indicesAdmin().prepareFlush("my-index").setForce(true).get(); + BroadcastResponse flushResponse = indicesAdmin().prepareFlush("my-index").setForce(true).get(); if (flushResponse.getSuccessfulShards() == 0) { StringBuilder sb = new StringBuilder("failed to flush at least one shard. total shards [").append( flushResponse.getTotalShards() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java index 3dd9feff9ce25..1c715beb04356 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.indices; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.BigArrays; @@ -99,7 +99,7 @@ public void testDeletesAloneCanTriggerRefresh() throws Exception { prepareIndex("index").setId(Integer.toString(i)).setSource("field", "value").get(); } // Force merge so we know all merges are done before we start deleting: - ForceMergeResponse r = client().admin().indices().prepareForceMerge().setMaxNumSegments(1).get(); + BaseBroadcastResponse r = client().admin().indices().prepareForceMerge().setMaxNumSegments(1).get(); assertNoFailures(r); final RefreshStats refreshStats = shard.refreshStats(); for (int i = 0; i < 100; i++) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 0b99e3ba3ffcf..62e6cb59994b2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -9,8 +9,8 @@ package org.elasticsearch.indices; import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; @@ -132,7 +132,7 @@ public void testQueryRewrite() throws Exception { assertCacheState(client, "index", 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); + BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); refresh(); ensureSearchable("index"); @@ -202,7 +202,7 @@ public void testQueryRewriteMissingValues() throws Exception { assertCacheState(client, "index", 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); + BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); refresh(); ensureSearchable("index"); @@ -269,7 +269,7 @@ public void testQueryRewriteDates() throws Exception { assertCacheState(client, "index", 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); + BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); refresh(); ensureSearchable("index"); @@ -343,7 +343,7 @@ public void testQueryRewriteDatesWithNow() throws Exception { assertCacheState(client, "index-3", 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = client.admin() + BroadcastResponse forceMergeResponse = client.admin() .indices() .prepareForceMerge("index-1", "index-2", "index-3") .setFlush(true) @@ -424,7 +424,7 @@ public void testCanCache() throws Exception { assertCacheState(client, "index", 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); + BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); refresh(); ensureSearchable("index"); @@ -529,7 +529,7 @@ public void testCacheWithFilteredAlias() { ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); client.prepareIndex("index").setId("1").setRouting("1").setSource("created_at", DateTimeFormatter.ISO_LOCAL_DATE.format(now)).get(); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); + BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).get(); ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java index a328148180107..17b18bf9af1ee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java @@ -9,8 +9,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; @@ -54,9 +54,9 @@ public void testWaitIfOngoing() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(10); final CopyOnWriteArrayList errors = new CopyOnWriteArrayList<>(); for (int j = 0; j < 10; j++) { - indicesAdmin().prepareFlush("test").execute(new ActionListener() { + indicesAdmin().prepareFlush("test").execute(new ActionListener<>() { @Override - public void onResponse(FlushResponse flushResponse) { + public void onResponse(BroadcastResponse flushResponse) { try { // don't use assertAllSuccessful it uses a randomized context that belongs to a different thread assertThat( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java index e5a8246ba6033..70cd143686dc8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -9,8 +9,8 @@ package org.elasticsearch.indices.mapping; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; @@ -82,7 +82,7 @@ public void testDynamicUpdates() throws Exception { indexRandom(true, false, indexRequests); logger.info("checking all the documents are there"); - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh().get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh().get(); assertThat(refreshResponse.getFailedShards(), equalTo(0)); assertHitCount(prepareSearch("test").setSize(0), recCount); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java index 2935efb4808a7..22f987cc855cc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/memory/breaker/RandomExceptionCircuitBreakerIT.java @@ -15,9 +15,9 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Setting; @@ -134,7 +134,7 @@ public void testBreakerWithRandomExceptions() throws IOException, InterruptedExc } logger.info("Start Refresh"); // don't assert on failures here - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().get(); final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0; logger.info( "Refresh failed: [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java index a98297e8b49ae..e70c48ce8184e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; @@ -31,6 +30,7 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -1138,7 +1138,7 @@ public void testFilterCacheStats() throws Exception { }); flush("index"); logger.info("--> force merging to a single segment"); - ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).setMaxNumSegments(1).get(); + BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge("index").setFlush(true).setMaxNumSegments(1).get(); assertAllSuccessful(forceMergeResponse); logger.info("--> refreshing"); refresh(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index d47c68690bab8..782aafece4399 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -8,11 +8,11 @@ package org.elasticsearch.recovery; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; @@ -405,7 +405,7 @@ private void logSearchResponse(int numberOfShards, long numberOfDocs, int iterat private void refreshAndAssert() throws Exception { assertBusy(() -> { - RefreshResponse actionGet = indicesAdmin().prepareRefresh().get(); + BroadcastResponse actionGet = indicesAdmin().prepareRefresh().get(); assertAllSuccessful(actionGet); }, 5, TimeUnit.MINUTES); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java index bd69aebcd415e..baa721cbbabd2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/recovery/SimpleRecoveryIT.java @@ -9,12 +9,11 @@ package org.elasticsearch.recovery; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.xcontent.XContentType; @@ -43,12 +42,12 @@ public void testSimpleRecovery() throws Exception { NumShards numShards = getNumShards("test"); client().index(new IndexRequest("test").id("1").source(source("1", "test"), XContentType.JSON)).actionGet(); - FlushResponse flushResponse = indicesAdmin().flush(new FlushRequest("test")).actionGet(); + BroadcastResponse flushResponse = indicesAdmin().flush(new FlushRequest("test")).actionGet(); assertThat(flushResponse.getTotalShards(), equalTo(numShards.totalNumShards)); assertThat(flushResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(flushResponse.getFailedShards(), equalTo(0)); client().index(new IndexRequest("test").id("2").source(source("2", "test"), XContentType.JSON)).actionGet(); - RefreshResponse refreshResponse = indicesAdmin().refresh(new RefreshRequest("test")).actionGet(); + BroadcastResponse refreshResponse = indicesAdmin().refresh(new RefreshRequest("test")).actionGet(); assertThat(refreshResponse.getTotalShards(), equalTo(numShards.totalNumShards)); assertThat(refreshResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(refreshResponse.getFailedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java index 97a400709cde7..68d00321848eb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWhileCreatingIndexIT.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.basic; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.index.query.QueryBuilders; @@ -55,7 +55,7 @@ private void searchWhileCreatingIndex(boolean createIndex, int numberOfReplicas) createIndex("test"); } prepareIndex("test").setId(id).setSource("field", "test").get(); - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh("test").get(); // at least one shard should be successful when refreshing assertThat(refreshResponse.getSuccessfulShards(), greaterThanOrEqualTo(1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java index 6ebfc61830269..6985ebb17386c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsIT.java @@ -14,8 +14,8 @@ import org.apache.lucene.tests.util.English; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -111,7 +111,7 @@ public void testRandomExceptions() throws IOException, InterruptedException, Exe } logger.info("Start Refresh"); // don't assert on failures here - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().get(); final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0; logger.info( "Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java index 33ef75b317e33..07d976437c24c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchWithRandomIOExceptionsIT.java @@ -13,8 +13,8 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -135,7 +135,7 @@ public void testRandomDirectoryIOExceptions() throws IOException, InterruptedExc ESIntegTestCase.NumShards numShards = getNumShards("test"); logger.info("Start Refresh"); // don't assert on failures here - final RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().get(); + final BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh("test").execute().get(); final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0; logger.info( "Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java index c4b0346170949..303030a523662 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/TransportSearchFailuresIT.java @@ -12,10 +12,10 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Priority; @@ -50,7 +50,7 @@ public void testFailedSearchWithWrongQuery() throws Exception { for (int i = 0; i < 100; i++) { index(client(), Integer.toString(i), "test", i); } - RefreshResponse refreshResponse = indicesAdmin().refresh(new RefreshRequest("test")).actionGet(); + BroadcastResponse refreshResponse = indicesAdmin().refresh(new RefreshRequest("test")).actionGet(); assertThat(refreshResponse.getTotalShards(), equalTo(test.totalNumShards)); assertThat(refreshResponse.getSuccessfulShards(), equalTo(test.numPrimaries)); assertThat(refreshResponse.getFailedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 81659323e2471..20c5c11f36756 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -12,13 +12,13 @@ import org.apache.lucene.analysis.TokenStreamToAutomaton; import org.apache.lucene.search.suggest.document.ContextSuggestField; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.FieldMemoryStats; import org.elasticsearch.common.settings.Settings; @@ -1267,7 +1267,7 @@ public void testPrunedSegments() throws IOException { .get(); // we have 2 docs in a segment... prepareIndex(INDEX).setId("2").setSource(jsonBuilder().startObject().field("somefield", "somevalue").endObject()).get(); - ForceMergeResponse actionGet = indicesAdmin().prepareForceMerge().setFlush(true).setMaxNumSegments(1).get(); + BroadcastResponse actionGet = indicesAdmin().prepareForceMerge().setFlush(true).setMaxNumSegments(1).get(); assertAllSuccessful(actionGet); refresh(); // update the first one and then merge.. the target segment will have no value in FIELD diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java index b126e4e51128f..df4d52727384f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStats; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -20,6 +19,7 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.common.settings.Settings; @@ -159,7 +159,7 @@ public void testForceMergeCausesFullSnapshot() throws Exception { clusterAdmin().prepareCreateSnapshot(repo, snapshot1).setIndices(indexName).setWaitForCompletion(true).get(); logger.info("--> force merging down to a single segment"); - final ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge(indexName).setMaxNumSegments(1).setFlush(true).get(); + final BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge(indexName).setMaxNumSegments(1).setFlush(true).get(); assertThat(forceMergeResponse.getFailedShards(), is(0)); final String snapshot2 = "snap-2"; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index ed070c3224aa2..c13891728f315 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -21,11 +21,11 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.RestoreInProgress; @@ -119,7 +119,7 @@ public void testBasicWorkFlow() throws Exception { createIndexWithRandomDocs("test-idx-2", 100); createIndexWithRandomDocs("test-idx-3", 100); - ActionFuture flushResponseFuture = null; + ActionFuture flushResponseFuture = null; if (randomBoolean()) { ArrayList indicesToFlush = new ArrayList<>(); for (int i = 1; i < 4; i++) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java index 79a70969edaaf..7c8bb34a13917 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheAction.java @@ -9,13 +9,14 @@ package org.elasticsearch.action.admin.indices.cache.clear; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; -public class ClearIndicesCacheAction extends ActionType { +public class ClearIndicesCacheAction extends ActionType { public static final ClearIndicesCacheAction INSTANCE = new ClearIndicesCacheAction(); public static final String NAME = "indices:admin/cache/clear"; private ClearIndicesCacheAction() { - super(NAME, ClearIndicesCacheResponse::new); + super(NAME, BroadcastResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java index 464c22d1119b0..fb6139c0ae4e3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequestBuilder.java @@ -9,11 +9,12 @@ package org.elasticsearch.action.admin.indices.cache.clear; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ElasticsearchClient; public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBuilder< ClearIndicesCacheRequest, - ClearIndicesCacheResponse, + BroadcastResponse, ClearIndicesCacheRequestBuilder> { public ClearIndicesCacheRequestBuilder(ElasticsearchClient client) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java deleted file mode 100644 index df0a298c87eeb..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponse.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.cache.clear; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; - -/** - * The response of a clear cache action. - */ -public class ClearIndicesCacheResponse extends BroadcastResponse { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "clear_cache", - true, - arg -> { - BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; - return new ClearIndicesCacheResponse( - response.getTotalShards(), - response.getSuccessfulShards(), - response.getFailedShards(), - Arrays.asList(response.getShardFailures()) - ); - } - ); - - static { - declareBroadcastFields(PARSER); - } - - ClearIndicesCacheResponse(StreamInput in) throws IOException { - super(in); - } - - ClearIndicesCacheResponse( - int totalShards, - int successfulShards, - int failedShards, - List shardFailures - ) { - super(totalShards, successfulShards, failedShards, shardFailures); - } - - public static ClearIndicesCacheResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java index 86f0093598744..faeaf0bdb575a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/TransportClearIndicesCacheAction.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -32,7 +33,7 @@ */ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAction< ClearIndicesCacheRequest, - ClearIndicesCacheResponse, + BroadcastResponse, TransportBroadcastByNodeAction.EmptyResult> { private final IndicesService indicesService; @@ -64,11 +65,11 @@ protected EmptyResult readShardResult(StreamInput in) throws IOException { } @Override - protected ResponseFactory getResponseFactory( + protected ResponseFactory getResponseFactory( ClearIndicesCacheRequest request, ClusterState clusterState ) { - return (totalShards, successfulShards, failedShards, responses, shardFailures) -> new ClearIndicesCacheResponse( + return (totalShards, successfulShards, failedShards, responses, shardFailures) -> new BroadcastResponse( totalShards, successfulShards, failedShards, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java index 27d96e5feddd5..313fb23c45a6d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushAction.java @@ -9,13 +9,14 @@ package org.elasticsearch.action.admin.indices.flush; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; -public class FlushAction extends ActionType { +public class FlushAction extends ActionType { public static final FlushAction INSTANCE = new FlushAction(); public static final String NAME = "indices:admin/flush"; private FlushAction() { - super(NAME, FlushResponse::new); + super(NAME, BroadcastResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java index 64485ad0d4496..fc326f804ce8a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequest.java @@ -23,7 +23,6 @@ * memory heuristics in order to automatically trigger flush operations as required in order to clear memory. * * @see org.elasticsearch.client.internal.IndicesAdminClient#flush(FlushRequest) - * @see FlushResponse */ public class FlushRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java index 4e474732e3bad..f23e247428698 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushRequestBuilder.java @@ -9,9 +9,10 @@ package org.elasticsearch.action.admin.indices.flush; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ElasticsearchClient; -public class FlushRequestBuilder extends BroadcastOperationRequestBuilder { +public class FlushRequestBuilder extends BroadcastOperationRequestBuilder { public FlushRequestBuilder(ElasticsearchClient client) { super(client, FlushAction.INSTANCE, new FlushRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java deleted file mode 100644 index 0a037ebe09f8a..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; - -/** - * A response to flush action. - */ -public class FlushResponse extends BroadcastResponse { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("flush", true, arg -> { - BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; - return new FlushResponse( - response.getTotalShards(), - response.getSuccessfulShards(), - response.getFailedShards(), - Arrays.asList(response.getShardFailures()) - ); - }); - - static { - declareBroadcastFields(PARSER); - } - - FlushResponse(StreamInput in) throws IOException { - super(in); - } - - FlushResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { - super(totalShards, successfulShards, failedShards, shardFailures); - } - - public static FlushResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index ade775db9c755..96b4a0191b10c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.client.internal.node.NodeClient; @@ -28,7 +29,7 @@ */ public class TransportFlushAction extends TransportBroadcastReplicationAction< FlushRequest, - FlushResponse, + BroadcastResponse, ShardFlushRequest, ReplicationResponse> { @@ -59,12 +60,12 @@ protected ShardFlushRequest newShardRequest(FlushRequest request, ShardId shardI } @Override - protected FlushResponse newResponse( + protected BroadcastResponse newResponse( int successfulShards, int failedShards, int totalNumCopies, List shardFailures ) { - return new FlushResponse(totalNumCopies, successfulShards, failedShards, shardFailures); + return new BroadcastResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java index 3ab30298a57f5..1270365cded0d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeAction.java @@ -9,13 +9,14 @@ package org.elasticsearch.action.admin.indices.forcemerge; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; -public class ForceMergeAction extends ActionType { +public class ForceMergeAction extends ActionType { public static final ForceMergeAction INSTANCE = new ForceMergeAction(); public static final String NAME = "indices:admin/forcemerge"; private ForceMergeAction() { - super(NAME, ForceMergeResponse::new); + super(NAME, BroadcastResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java index 241f1a0c7fbf6..37075dd896b80 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -30,7 +30,6 @@ * to execute, and if so, executes it * * @see org.elasticsearch.client.internal.IndicesAdminClient#forceMerge(ForceMergeRequest) - * @see ForceMergeResponse */ public class ForceMergeRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java index 835749751f4a6..d4c15ee799670 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequestBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.forcemerge; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ElasticsearchClient; /** @@ -20,7 +21,7 @@ */ public class ForceMergeRequestBuilder extends BroadcastOperationRequestBuilder< ForceMergeRequest, - ForceMergeResponse, + BroadcastResponse, ForceMergeRequestBuilder> { public ForceMergeRequestBuilder(ElasticsearchClient client) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java deleted file mode 100644 index 3853a944e8676..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponse.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.forcemerge; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; - -/** - * A response for force merge action. - */ -public class ForceMergeResponse extends BroadcastResponse { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "force_merge", - true, - arg -> { - BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; - return new ForceMergeResponse( - response.getTotalShards(), - response.getSuccessfulShards(), - response.getFailedShards(), - Arrays.asList(response.getShardFailures()) - ); - } - ); - - static { - declareBroadcastFields(PARSER); - } - - ForceMergeResponse(StreamInput in) throws IOException { - super(in); - } - - public ForceMergeResponse( - int totalShards, - int successfulShards, - int failedShards, - List shardFailures - ) { - super(totalShards, successfulShards, failedShards, shardFailures); - } - - public static ForceMergeResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java index a70498695e149..df98e8f12f18e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/TransportForceMergeAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -35,7 +36,7 @@ */ public class TransportForceMergeAction extends TransportBroadcastByNodeAction< ForceMergeRequest, - ForceMergeResponse, + BroadcastResponse, TransportBroadcastByNodeAction.EmptyResult> { private final IndicesService indicesService; @@ -68,8 +69,8 @@ protected EmptyResult readShardResult(StreamInput in) throws IOException { } @Override - protected ResponseFactory getResponseFactory(ForceMergeRequest request, ClusterState clusterState) { - return (totalShards, successfulShards, failedShards, responses, shardFailures) -> new ForceMergeResponse( + protected ResponseFactory getResponseFactory(ForceMergeRequest request, ClusterState clusterState) { + return (totalShards, successfulShards, failedShards, responses, shardFailures) -> new BroadcastResponse( totalShards, successfulShards, failedShards, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java index 7d9ca67b9fa9e..f094ff75d9c41 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java @@ -9,13 +9,14 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; -public class RefreshAction extends ActionType { +public class RefreshAction extends ActionType { public static final RefreshAction INSTANCE = new RefreshAction(); public static final String NAME = "indices:admin/refresh"; private RefreshAction() { - super(NAME, RefreshResponse::new); + super(NAME, BroadcastResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java index d0f9e99fd08ec..1f703e59980d6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java @@ -19,7 +19,6 @@ * default a refresh is scheduled periodically. * * @see org.elasticsearch.client.internal.IndicesAdminClient#refresh(RefreshRequest) - * @see RefreshResponse */ public class RefreshRequest extends BroadcastRequest { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java index 51d569dac0c30..c503ff6ca6930 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequestBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ElasticsearchClient; /** @@ -16,7 +17,7 @@ * capabilities depends on the index engine used. For example, the internal one requires refresh to be called, but by * default a refresh is scheduled periodically. */ -public class RefreshRequestBuilder extends BroadcastOperationRequestBuilder { +public class RefreshRequestBuilder extends BroadcastOperationRequestBuilder { public RefreshRequestBuilder(ElasticsearchClient client) { super(client, RefreshAction.INSTANCE, new RefreshRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java deleted file mode 100644 index 5669591a17dc7..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.refresh; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Arrays; -import java.util.List; - -/** - * The response of a refresh action. - */ -public class RefreshResponse extends BroadcastResponse { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("refresh", true, arg -> { - BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; - return new RefreshResponse( - response.getTotalShards(), - response.getSuccessfulShards(), - response.getFailedShards(), - Arrays.asList(response.getShardFailures()) - ); - }); - - static { - declareBroadcastFields(PARSER); - } - - RefreshResponse(StreamInput in) throws IOException { - super(in); - } - - public RefreshResponse( - int totalShards, - int successfulShards, - int failedShards, - List shardFailures - ) { - super(totalShards, successfulShards, failedShards, shardFailures); - } - - public static RefreshResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 7537e74e2c780..5d6f60216ae05 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; @@ -29,7 +30,7 @@ */ public class TransportRefreshAction extends TransportBroadcastReplicationAction< RefreshRequest, - RefreshResponse, + BroadcastResponse, BasicReplicationRequest, ReplicationResponse> { @@ -62,12 +63,12 @@ protected BasicReplicationRequest newShardRequest(RefreshRequest request, ShardI } @Override - protected RefreshResponse newResponse( + protected BroadcastResponse newResponse( int successfulShards, int failedShards, int totalNumCopies, List shardFailures ) { - return new RefreshResponse(totalNumCopies, successfulShards, failedShards, shardFailures); + return new BroadcastResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java index 52b4c00175fa8..b69b87190f2a7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/BaseBroadcastResponse.java @@ -42,7 +42,7 @@ public class BaseBroadcastResponse extends ActionResponse { private final DefaultShardOperationFailedException[] shardFailures; @SuppressWarnings("unchecked") - protected static void declareBroadcastFields(ConstructingObjectParser PARSER) { + public static void declareBroadcastFields(ConstructingObjectParser PARSER) { ConstructingObjectParser shardsParser = new ConstructingObjectParser<>( "_shards", true, diff --git a/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java index 9ba26b95244ab..af3325dc6bd8f 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; @@ -30,10 +29,8 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; @@ -56,7 +53,6 @@ import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.resolve.ResolveIndexAction; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequestBuilder; @@ -85,6 +81,7 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock; import org.elasticsearch.core.Nullable; @@ -261,7 +258,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The refresh request * @return The result future */ - ActionFuture refresh(RefreshRequest request); + ActionFuture refresh(RefreshRequest request); /** * Explicitly refresh one or more indices (making the content indexed since the last refresh searchable). @@ -269,7 +266,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The refresh request * @param listener A listener to be notified with a result */ - void refresh(RefreshRequest request, ActionListener listener); + void refresh(RefreshRequest request, ActionListener listener); /** * Explicitly refresh one or more indices (making the content indexed since the last refresh searchable). @@ -282,7 +279,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The flush request * @return A result future */ - ActionFuture flush(FlushRequest request); + ActionFuture flush(FlushRequest request); /** * Explicitly flush one or more indices (releasing memory from the node). @@ -290,7 +287,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The flush request * @param listener A listener to be notified with a result */ - void flush(FlushRequest request, ActionListener listener); + void flush(FlushRequest request, ActionListener listener); /** * Explicitly flush one or more indices (releasing memory from the node). @@ -303,7 +300,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The optimize request * @return A result future */ - ActionFuture forceMerge(ForceMergeRequest request); + ActionFuture forceMerge(ForceMergeRequest request); /** * Explicitly force merge one or more indices into a the number of segments. @@ -311,7 +308,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The force merge request * @param listener A listener to be notified with a result */ - void forceMerge(ForceMergeRequest request, ActionListener listener); + void forceMerge(ForceMergeRequest request, ActionListener listener); /** * Explicitly force merge one or more indices into a the number of segments. @@ -436,7 +433,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The clear indices cache request * @return The result future */ - ActionFuture clearCache(ClearIndicesCacheRequest request); + ActionFuture clearCache(ClearIndicesCacheRequest request); /** * Clear indices cache. @@ -444,7 +441,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { * @param request The clear indices cache request * @param listener A listener to be notified with a result */ - void clearCache(ClearIndicesCacheRequest request, ActionListener listener); + void clearCache(ClearIndicesCacheRequest request, ActionListener listener); /** * Clear indices cache. diff --git a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java index 12f3dec804809..f38cd7551dad7 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java @@ -129,7 +129,6 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheAction; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequestBuilder; -import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; @@ -144,11 +143,9 @@ import org.elasticsearch.action.admin.indices.flush.FlushAction; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; @@ -179,7 +176,6 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.resolve.ResolveIndexAction; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; @@ -275,6 +271,7 @@ import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsAction; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; @@ -1118,7 +1115,7 @@ public GetAliasesRequestBuilder prepareGetAliases(String... aliases) { } @Override - public ActionFuture clearCache(final ClearIndicesCacheRequest request) { + public ActionFuture clearCache(final ClearIndicesCacheRequest request) { return execute(ClearIndicesCacheAction.INSTANCE, request); } @@ -1138,7 +1135,7 @@ public GetIndexRequestBuilder prepareGetIndex() { } @Override - public void clearCache(final ClearIndicesCacheRequest request, final ActionListener listener) { + public void clearCache(final ClearIndicesCacheRequest request, final ActionListener listener) { execute(ClearIndicesCacheAction.INSTANCE, request, listener); } @@ -1218,12 +1215,12 @@ public OpenIndexRequestBuilder prepareOpen(String... indices) { } @Override - public ActionFuture flush(final FlushRequest request) { + public ActionFuture flush(final FlushRequest request) { return execute(FlushAction.INSTANCE, request); } @Override - public void flush(final FlushRequest request, final ActionListener listener) { + public void flush(final FlushRequest request, final ActionListener listener) { execute(FlushAction.INSTANCE, request, listener); } @@ -1278,12 +1275,12 @@ public PutMappingRequestBuilder preparePutMapping(String... indices) { } @Override - public ActionFuture forceMerge(final ForceMergeRequest request) { + public ActionFuture forceMerge(final ForceMergeRequest request) { return execute(ForceMergeAction.INSTANCE, request); } @Override - public void forceMerge(final ForceMergeRequest request, final ActionListener listener) { + public void forceMerge(final ForceMergeRequest request, final ActionListener listener) { execute(ForceMergeAction.INSTANCE, request, listener); } @@ -1293,12 +1290,12 @@ public ForceMergeRequestBuilder prepareForceMerge(String... indices) { } @Override - public ActionFuture refresh(final RefreshRequest request) { + public ActionFuture refresh(final RefreshRequest request) { return execute(RefreshAction.INSTANCE, request); } @Override - public void refresh(final RefreshRequest request, final ActionListener listener) { + public void refresh(final RefreshRequest request, final ActionListener listener) { execute(RefreshAction.INSTANCE, request, listener); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java index 4c9ac8fcb9a3c..815c3ce7e2c33 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestForceMergeAction.java @@ -11,9 +11,9 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; @@ -65,7 +65,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (validationException != null) { throw validationException; } - final var responseListener = new SubscribableListener(); + final var responseListener = new SubscribableListener(); final var task = client.executeLocally(ForceMergeAction.INSTANCE, mergeRequest, responseListener); responseListener.addListener(new LoggingTaskListener<>(task)); return sendTask(client.getLocalNodeId(), task); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java index cf238d57c4cab..97964b09593f5 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java @@ -9,8 +9,8 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.core.RestApiVersion; @@ -55,14 +55,14 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return channel -> client.admin().indices().flush(flushRequest, new SimulateSyncedFlushResponseListener(channel)); } - static final class SimulateSyncedFlushResponseListener extends RestBuilderListener { + static final class SimulateSyncedFlushResponseListener extends RestBuilderListener { SimulateSyncedFlushResponseListener(RestChannel channel) { super(channel); } @Override - public RestResponse buildResponse(FlushResponse flushResponse, XContentBuilder builder) throws Exception { + public RestResponse buildResponse(BroadcastResponse flushResponse, XContentBuilder builder) throws Exception { builder.startObject(); buildSyncedFlushResponse(builder, flushResponse); builder.endObject(); @@ -70,7 +70,7 @@ public RestResponse buildResponse(FlushResponse flushResponse, XContentBuilder b return new RestResponse(restStatus, builder); } - private static void buildSyncedFlushResponse(XContentBuilder builder, FlushResponse flushResponse) throws IOException { + private static void buildSyncedFlushResponse(XContentBuilder builder, BroadcastResponse flushResponse) throws IOException { builder.startObject("_shards"); builder.field("total", flushResponse.getTotalShards()); builder.field("successful", flushResponse.getSuccessfulShards()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponseTests.java deleted file mode 100644 index 4f3b14cd986c1..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheResponseTests.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.cache.clear; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.test.AbstractBroadcastResponseTestCase; -import org.elasticsearch.xcontent.XContentParser; - -import java.util.List; - -public class ClearIndicesCacheResponseTests extends AbstractBroadcastResponseTestCase { - - @Override - protected ClearIndicesCacheResponse createTestInstance( - int totalShards, - int successfulShards, - int failedShards, - List failures - ) { - return new ClearIndicesCacheResponse(totalShards, successfulShards, failedShards, failures); - } - - @Override - protected ClearIndicesCacheResponse doParseInstance(XContentParser parser) { - return ClearIndicesCacheResponse.fromXContent(parser); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushResponseTests.java deleted file mode 100644 index 62611060ce25d..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushResponseTests.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.test.AbstractBroadcastResponseTestCase; -import org.elasticsearch.xcontent.XContentParser; - -import java.util.List; - -public class FlushResponseTests extends AbstractBroadcastResponseTestCase { - - @Override - protected FlushResponse createTestInstance( - int totalShards, - int successfulShards, - int failedShards, - List failures - ) { - return new FlushResponse(totalShards, successfulShards, failedShards, failures); - } - - @Override - protected FlushResponse doParseInstance(XContentParser parser) { - return FlushResponse.fromXContent(parser); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java deleted file mode 100644 index ed1160edeb8f5..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeResponseTests.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.forcemerge; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.test.AbstractBroadcastResponseTestCase; -import org.elasticsearch.xcontent.XContentParser; - -import java.util.List; - -public class ForceMergeResponseTests extends AbstractBroadcastResponseTestCase { - @Override - protected ForceMergeResponse createTestInstance( - int totalShards, - int successfulShards, - int failedShards, - List failures - ) { - return new ForceMergeResponse(totalShards, successfulShards, failedShards, failures); - } - - @Override - protected ForceMergeResponse doParseInstance(XContentParser parser) { - return ForceMergeResponse.fromXContent(parser); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java deleted file mode 100644 index 5a3183b3e61b9..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponseTests.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.refresh; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.test.AbstractBroadcastResponseTestCase; -import org.elasticsearch.xcontent.XContentParser; - -import java.util.List; - -public class RefreshResponseTests extends AbstractBroadcastResponseTestCase { - - @Override - protected RefreshResponse createTestInstance( - int totalShards, - int successfulShards, - int failedShards, - List failures - ) { - return new RefreshResponse(totalShards, successfulShards, failedShards, failures); - } - - @Override - protected RefreshResponse doParseInstance(XContentParser parser) { - return RefreshResponse.fromXContent(parser); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index 8bda62b91bc7e..86749c26ba730 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.flush.TransportFlushAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionTestUtils; @@ -20,6 +19,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.ShardRoutingState; @@ -286,9 +286,9 @@ protected void shardExecute( } } - public FlushResponse assertImmediateResponse(String index, TransportFlushAction flushAction) { + public BroadcastResponse assertImmediateResponse(String index, TransportFlushAction flushAction) { Date beginDate = new Date(); - FlushResponse flushResponse = ActionTestUtils.executeBlocking(flushAction, new FlushRequest(index)); + BroadcastResponse flushResponse = ActionTestUtils.executeBlocking(flushAction, new FlushRequest(index)); Date endDate = new Date(); long maxTime = 500; assertThat( diff --git a/server/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java b/server/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java index 6a87c0f704600..6577148d78c7b 100644 --- a/server/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java +++ b/server/src/test/java/org/elasticsearch/index/fieldstats/FieldStatsProviderRefreshTests.java @@ -9,8 +9,8 @@ package org.elasticsearch.index.fieldstats; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.IndicesRequestCache; import org.elasticsearch.rest.RestStatus; @@ -88,7 +88,7 @@ private void assertRequestCacheStats(long expectedHits, long expectedMisses) { } private void refreshIndex() { - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh("index").get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh("index").get(); assertThat(refreshResponse.getSuccessfulShards(), equalTo(refreshResponse.getSuccessfulShards())); } diff --git a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java index b12bcd8b55880..a5ace3e357f90 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java @@ -11,9 +11,9 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.NodeConnectionsService; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -466,7 +466,7 @@ private static void createSnapshotThatCanBeUsedDuringRecovery(String indexName) }, 60, TimeUnit.SECONDS); // Force merge to make sure that the resulting snapshot would contain the same index files as the safe commit - ForceMergeResponse forceMergeResponse = client().admin().indices().prepareForceMerge(indexName).setFlush(randomBoolean()).get(); + BroadcastResponse forceMergeResponse = client().admin().indices().prepareForceMerge(indexName).setFlush(randomBoolean()).get(); assertThat(forceMergeResponse.getTotalShards(), equalTo(forceMergeResponse.getSuccessfulShards())); // create repo diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java index 2a1cba66f79f9..b6415eea7db2c 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java @@ -16,7 +16,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.LuceneTestCase; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.network.InetAddresses; @@ -164,7 +164,7 @@ public final void testSnapshotWithLargeSegmentFiles() throws Exception { } flushAndRefresh(index); - ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + BroadcastResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); @@ -191,7 +191,7 @@ public void testRequestStats() throws Exception { } flushAndRefresh(index); - ForceMergeResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); + BroadcastResponse forceMerge = client().admin().indices().prepareForceMerge(index).setFlush(true).setMaxNumSegments(1).get(); assertThat(forceMerge.getSuccessfulShards(), equalTo(1)); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 175594ac8210f..65b28ad874431 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -34,10 +34,7 @@ import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.segments.IndexSegments; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; @@ -57,6 +54,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.internal.AdminClient; @@ -1478,9 +1476,9 @@ protected final DocWriteResponse index(String index, String id, String source) { * * @see #waitForRelocation() */ - protected final RefreshResponse refresh(String... indices) { + protected final BroadcastResponse refresh(String... indices) { waitForRelocation(); - RefreshResponse actionGet = indicesAdmin().prepareRefresh(indices) + BroadcastResponse actionGet = indicesAdmin().prepareRefresh(indices) .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED) .get(); assertNoFailures(actionGet); @@ -1498,9 +1496,9 @@ protected final void flushAndRefresh(String... indices) { /** * Flush some or all indices in the cluster. */ - protected final FlushResponse flush(String... indices) { + protected final BroadcastResponse flush(String... indices) { waitForRelocation(); - FlushResponse actionGet = indicesAdmin().prepareFlush(indices).get(); + BroadcastResponse actionGet = indicesAdmin().prepareFlush(indices).get(); for (DefaultShardOperationFailedException failure : actionGet.getShardFailures()) { assertThat("unexpected flush failure " + failure.reason(), failure.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); } @@ -1510,9 +1508,9 @@ protected final FlushResponse flush(String... indices) { /** * Waits for all relocations and force merge all indices in the cluster to 1 segment. */ - protected ForceMergeResponse forceMerge() { + protected BroadcastResponse forceMerge() { waitForRelocation(); - ForceMergeResponse actionGet = indicesAdmin().prepareForceMerge().setMaxNumSegments(1).get(); + BroadcastResponse actionGet = indicesAdmin().prepareForceMerge().setMaxNumSegments(1).get(); assertNoFailures(actionGet); return actionGet; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index a2806663ff321..a1af258784903 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -32,8 +32,9 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; +import org.elasticsearch.action.support.broadcast.BaseBroadcastResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -72,6 +73,7 @@ import org.elasticsearch.index.seqno.ReplicationTracker; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; @@ -1265,15 +1267,33 @@ protected void refreshAllIndices() throws IOException { client().performRequest(refreshRequest); } - protected static RefreshResponse refresh(String index) throws IOException { + protected static BroadcastResponse refresh(String index) throws IOException { return refresh(client(), index); } - protected static RefreshResponse refresh(RestClient client, String index) throws IOException { + private static final ConstructingObjectParser BROADCAST_RESPONSE_PARSER = new ConstructingObjectParser<>( + "broadcast_response", + true, + arg -> { + BaseBroadcastResponse response = (BaseBroadcastResponse) arg[0]; + return new BroadcastResponse( + response.getTotalShards(), + response.getSuccessfulShards(), + response.getFailedShards(), + Arrays.asList(response.getShardFailures()) + ); + } + ); + + static { + BaseBroadcastResponse.declareBroadcastFields(BROADCAST_RESPONSE_PARSER); + } + + protected static BroadcastResponse refresh(RestClient client, String index) throws IOException { Request refreshRequest = new Request("POST", "/" + index + "/_refresh"); Response response = client.performRequest(refreshRequest); try (var parser = responseAsParser(response)) { - return RefreshResponse.fromXContent(parser); + return BROADCAST_RESPONSE_PARSER.apply(parser, null); } } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index ea4bc8c92047a..4ce64bc41d6a1 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -15,12 +15,12 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.analysis.common.CommonAnalysisPlugin; import org.elasticsearch.client.internal.Client; @@ -461,8 +461,8 @@ protected final Index resolveFollowerIndex(String index) { return new Index(index, uuid); } - protected final RefreshResponse refresh(Client client, String... indices) { - RefreshResponse actionGet = client.admin().indices().prepareRefresh(indices).get(); + protected final BroadcastResponse refresh(Client client, String... indices) { + BroadcastResponse actionGet = client.admin().indices().prepareRefresh(indices).get(); assertNoFailures(actionGet); return actionGet; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeStepTests.java index 962e789cac7d6..b16983c6a7ac6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ForceMergeStepTests.java @@ -10,9 +10,9 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -70,13 +70,13 @@ public void testPerformActionComplete() throws Exception { Step.StepKey stepKey = randomStepKey(); StepKey nextStepKey = randomStepKey(); int maxNumSegments = randomIntBetween(1, 10); - ForceMergeResponse forceMergeResponse = Mockito.mock(ForceMergeResponse.class); + BroadcastResponse forceMergeResponse = Mockito.mock(BroadcastResponse.class); Mockito.when(forceMergeResponse.getStatus()).thenReturn(RestStatus.OK); Mockito.doAnswer(invocationOnMock -> { ForceMergeRequest request = (ForceMergeRequest) invocationOnMock.getArguments()[0]; assertThat(request.maxNumSegments(), equalTo(maxNumSegments)); @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onResponse(forceMergeResponse); return null; }).when(indicesClient).forceMerge(any(), any()); @@ -95,7 +95,7 @@ public void testPerformActionThrowsException() { Step.StepKey stepKey = randomStepKey(); StepKey nextStepKey = randomStepKey(); int maxNumSegments = randomIntBetween(1, 10); - ForceMergeResponse forceMergeResponse = Mockito.mock(ForceMergeResponse.class); + BroadcastResponse forceMergeResponse = Mockito.mock(BroadcastResponse.class); Mockito.when(forceMergeResponse.getStatus()).thenReturn(RestStatus.OK); Mockito.doAnswer(invocationOnMock -> { ForceMergeRequest request = (ForceMergeRequest) invocationOnMock.getArguments()[0]; @@ -103,7 +103,7 @@ public void testPerformActionThrowsException() { assertThat(request.indices()[0], equalTo(indexMetadata.getIndex().getName())); assertThat(request.maxNumSegments(), equalTo(maxNumSegments)); @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onFailure(exception); return null; }).when(indicesClient).forceMerge(any(), any()); @@ -126,7 +126,7 @@ public void testForcemergeFailsOnSomeShards() { .numberOfReplicas(randomIntBetween(0, 5)) .build(); Index index = indexMetadata.getIndex(); - ForceMergeResponse forceMergeResponse = Mockito.mock(ForceMergeResponse.class); + BroadcastResponse forceMergeResponse = Mockito.mock(BroadcastResponse.class); Mockito.when(forceMergeResponse.getTotalShards()).thenReturn(numberOfShards); Mockito.when(forceMergeResponse.getFailedShards()).thenReturn(numberOfShards - 1); Mockito.when(forceMergeResponse.getStatus()).thenReturn(RestStatus.BAD_REQUEST); @@ -143,7 +143,7 @@ public void testForcemergeFailsOnSomeShards() { Mockito.doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onResponse(forceMergeResponse); return null; }).when(indicesClient).forceMerge(any(), any()); diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index 5cceffd0f4818..f3bb43b9a3f38 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -17,12 +17,12 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; import org.elasticsearch.client.internal.Client; @@ -848,7 +848,7 @@ public void onFailure(Exception e) { /** * Updates the downsample target index metadata (task status) */ - class RefreshDownsampleIndexActionListener implements ActionListener { + class RefreshDownsampleIndexActionListener implements ActionListener { private final ActionListener actionListener; private final TaskId parentTask; @@ -868,7 +868,7 @@ class RefreshDownsampleIndexActionListener implements ActionListener fetchAllAuditMessages(String jobId) throws Exception { RefreshRequest refreshRequest = new RefreshRequest(NotificationsIndex.NOTIFICATIONS_INDEX); - RefreshResponse refreshResponse = client().execute(RefreshAction.INSTANCE, refreshRequest).actionGet(); + BroadcastResponse refreshResponse = client().execute(RefreshAction.INSTANCE, refreshRequest).actionGet(); assertThat(refreshResponse.getStatus().getStatus(), anyOf(equalTo(200), equalTo(201))); SearchRequest searchRequest = new SearchRequestBuilder(client()).setIndices(NotificationsIndex.NOTIFICATIONS_INDEX) diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java index 51f6243778517..ffe70d9747a56 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelProviderIT.java @@ -7,12 +7,12 @@ package org.elasticsearch.xpack.ml.integration; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.license.License; @@ -109,7 +109,7 @@ public void testGetTrainedModelConfig() throws Exception { ); assertThat(exceptionHolder.get(), is(nullValue())); - AtomicReference refreshResponseAtomicReference = new AtomicReference<>(); + AtomicReference refreshResponseAtomicReference = new AtomicReference<>(); blockingCall( listener -> trainedModelProvider.refreshInferenceIndex(listener), refreshResponseAtomicReference, @@ -198,7 +198,7 @@ public void testGetTrainedModelConfigWithMultiDocDefinition() throws Exception { ); blockingCall( listener -> trainedModelProvider.refreshInferenceIndex(listener), - new AtomicReference(), + new AtomicReference(), new AtomicReference<>() ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersister.java index 7eef0e526eac3..2012ca87578b0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersister.java @@ -12,7 +12,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.license.License; @@ -157,7 +157,7 @@ private CountDownLatch storeTrainedModelDoc(TrainedModelDefinitionDoc trainedMod CountDownLatch latch = new CountDownLatch(1); // Latch is attached to this action as it is the last one to execute. - ActionListener refreshListener = new LatchedActionListener<>(ActionListener.wrap(refreshed -> { + ActionListener refreshListener = new LatchedActionListener<>(ActionListener.wrap(refreshed -> { if (refreshed != null) { LOGGER.debug(() -> "[" + analytics.getId() + "] refreshed inference index after model store"); } @@ -210,7 +210,7 @@ private CountDownLatch storeTrainedModelMetadata(TrainedModelMetadata trainedMod CountDownLatch latch = new CountDownLatch(1); // Latch is attached to this action as it is the last one to execute. - ActionListener refreshListener = new LatchedActionListener<>(ActionListener.wrap(refreshed -> { + ActionListener refreshListener = new LatchedActionListener<>(ActionListener.wrap(refreshed -> { if (refreshed != null) { LOGGER.debug(() -> "[" + analytics.getId() + "] refreshed inference index after model metadata store"); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java index 1b6818a8727f3..0c693ff2d34f4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AbstractDataFrameAnalyticsStep.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.tasks.TaskId; @@ -76,7 +76,7 @@ public final void execute(ActionListener listener) { protected abstract void doExecute(ActionListener listener); - protected void refreshDestAsync(ActionListener refreshListener) { + protected void refreshDestAsync(ActionListener refreshListener) { ParentTaskAssigningClient parentTaskClient = parentTaskClient(); executeWithHeadersAsync( config.getHeaders(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java index 8adf5b3f0621a..9e56387ed773e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/AnalysisStep.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.ml.dataframe.steps; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.TimeValue; @@ -63,7 +63,7 @@ protected void doExecute(ActionListener listener) { listener::onFailure ); - ActionListener refreshListener = ActionListener.wrap(refreshResponse -> { + ActionListener refreshListener = ActionListener.wrap(refreshResponse -> { // TODO This could fail with errors. In that case we get stuck with the copied index. // We could delete the index in case of failure or we could try building the factory before reindexing // to catch the error early on. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java index 7b27090dc302d..dbf1f3e7be3d9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/FinalStep.java @@ -13,10 +13,10 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.TransportIndexAction; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContent; @@ -60,7 +60,7 @@ public Name name() { @Override protected void doExecute(ActionListener listener) { - ActionListener refreshListener = ActionListener.wrap( + ActionListener refreshListener = ActionListener.wrap( refreshResponse -> listener.onResponse(new StepResponse(false)), listener::onFailure ); @@ -89,7 +89,7 @@ private void indexDataCounts(ActionListener listener) { } } - private void refreshIndices(ActionListener listener) { + private void refreshIndices(ActionListener listener) { RefreshRequest refreshRequest = new RefreshRequest( AnomalyDetectorsIndex.jobStateIndexPattern(), MlStatsIndex.indexPattern(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java index 65ac2b678d93b..ad005e6d9ae6c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/InferenceStep.java @@ -11,9 +11,9 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; @@ -85,7 +85,7 @@ protected void doExecute(ActionListener listener) { } }, listener::onFailure); - ActionListener refreshDestListener = ActionListener.wrap( + ActionListener refreshDestListener = ActionListener.wrap( refreshResponse -> searchIfTestDocsExist(testDocsExistListener), listener::onFailure ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java index d267966a1d795..dd6d498b425d5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; @@ -30,6 +29,7 @@ import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.CheckedBiFunction; import org.elasticsearch.common.Numbers; @@ -419,7 +419,7 @@ public void getTrainedModelMetadata( })); } - public void refreshInferenceIndex(ActionListener listener) { + public void refreshInferenceIndex(ActionListener listener) { executeAsyncWithOrigin( client, ML_ORIGIN, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java index ac16948e32ed6..577bbe3dac6ce 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobDataDeleter.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; @@ -23,6 +22,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.TransportMultiSearchAction; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; @@ -451,7 +451,7 @@ private void deleteResultsByQuery( ) { assert indices.length > 0; - ActionListener refreshListener = ActionListener.wrap(refreshResponse -> { + ActionListener refreshListener = ActionListener.wrap(refreshResponse -> { logger.info("[{}] running delete by query on [{}]", jobId, String.join(", ", indices)); ConstantScoreQueryBuilder query = new ConstantScoreQueryBuilder(new TermQueryBuilder(Job.ID.getPreferredName(), jobId)); DeleteByQueryRequest request = new DeleteByQueryRequest(indices).setQuery(query) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersisterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersisterTests.java index d9176b74d2d3f..c308f95d483a5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersisterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/ChunkedTrainedModelPersisterTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.ml.dataframe.process; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.license.License; import org.elasticsearch.test.ESTestCase; @@ -102,7 +102,7 @@ public void testPersistAllDocs() { }).when(trainedModelProvider).storeTrainedModelMetadata(any(TrainedModelMetadata.class), any(ActionListener.class)); doAnswer(invocationOnMock -> { - ActionListener storeListener = (ActionListener) invocationOnMock.getArguments()[0]; + ActionListener storeListener = (ActionListener) invocationOnMock.getArguments()[0]; storeListener.onResponse(null); return null; }).when(trainedModelProvider).refreshInferenceIndex(any(ActionListener.class)); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index 68b5b8953ccb7..bf979f9deabf0 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -11,12 +11,12 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.scheduler.SchedulerEngine; @@ -164,10 +164,10 @@ protected void doSaveState(IndexerState indexerState, Map positi @Override protected void onFinish(ActionListener listener) { final RollupJobConfig jobConfig = job.getConfig(); - final ActionListener refreshResponseActionListener = new ActionListener<>() { + final ActionListener refreshResponseActionListener = new ActionListener<>() { @Override - public void onResponse(RefreshResponse refreshResponse) { + public void onResponse(BroadcastResponse refreshResponse) { logger.trace("refreshing rollup index {} successful for job {}", jobConfig.getRollupIndex(), jobConfig.getId()); listener.onResponse(null); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index 7fcde59f73088..430ba6d6faec5 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -11,10 +11,10 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.scheduler.SchedulerEngine; import org.elasticsearch.common.settings.Settings; @@ -590,7 +590,7 @@ public void testTriggerWithoutHeaders() throws Exception { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); Client client = mock(Client.class); doAnswer(invocationOnMock -> { - RefreshResponse r = new RefreshResponse(2, 2, 0, Collections.emptyList()); + BroadcastResponse r = new BroadcastResponse(2, 2, 0, Collections.emptyList()); ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r); return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(), any()); @@ -697,7 +697,7 @@ public void testTriggerWithHeaders() throws Exception { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), headers); Client client = mock(Client.class); doAnswer(invocationOnMock -> { - RefreshResponse r = new RefreshResponse(2, 2, 0, Collections.emptyList()); + BroadcastResponse r = new BroadcastResponse(2, 2, 0, Collections.emptyList()); ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r); return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(), any()); @@ -806,7 +806,7 @@ public void testSaveStateChangesIDScheme() throws Exception { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), headers); Client client = mock(Client.class); doAnswer(invocationOnMock -> { - RefreshResponse r = new RefreshResponse(2, 2, 0, Collections.emptyList()); + BroadcastResponse r = new BroadcastResponse(2, 2, 0, Collections.emptyList()); ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(r); return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(), any()); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java index 7ee81b444af46..a31d016c143ae 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.Metadata; @@ -31,7 +32,6 @@ import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest; import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheAction; import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheRequest; -import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheResponse; import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsAction; import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsRequest; import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsResponse; @@ -121,11 +121,11 @@ public void testStatsRequiresLicense() throws ExecutionException, InterruptedExc } public void testClearCacheRequiresLicense() throws ExecutionException, InterruptedException { - final ActionFuture future = client().execute( + final ActionFuture future = client().execute( ClearSearchableSnapshotsCacheAction.INSTANCE, new ClearSearchableSnapshotsCacheRequest(indexName) ); - final ClearSearchableSnapshotsCacheResponse response = future.get(); + final BroadcastResponse response = future.get(); assertThat(response.getTotalShards(), greaterThan(0)); assertThat(response.getSuccessfulShards(), equalTo(0)); for (DefaultShardOperationFailedException shardFailure : response.getShardFailures()) { diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java index 37b3ecfd36959..c1c40acbd43c5 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheIntegTests.java @@ -8,10 +8,9 @@ package org.elasticsearch.xpack.searchablesnapshots.cache.blob; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.blobcache.shared.SharedBlobCacheService; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; @@ -139,7 +138,7 @@ public void testBlobStoreCache() throws Exception { if (randomBoolean()) { logger.info("--> force-merging index before snapshotting"); - final ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge(indexName).setMaxNumSegments(1).get(); + final BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge(indexName).setMaxNumSegments(1).get(); assertThat(forceMergeResponse.getSuccessfulShards(), equalTo(numberOfShards.totalNumShards)); assertThat(forceMergeResponse.getFailedShards(), equalTo(0)); } @@ -355,7 +354,7 @@ private Client systemClient() { private void refreshSystemIndex() { try { - final RefreshResponse refreshResponse = systemClient().admin().indices().prepareRefresh(SNAPSHOT_BLOB_CACHE_INDEX).get(); + final BroadcastResponse refreshResponse = systemClient().admin().indices().prepareRefresh(SNAPSHOT_BLOB_CACHE_INDEX).get(); assertThat(refreshResponse.getSuccessfulShards(), greaterThan(0)); assertThat(refreshResponse.getFailedShards(), equalTo(0)); } catch (IndexNotFoundException indexNotFoundException) { diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java index 981ffe2832e66..56074f97650f0 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java @@ -10,9 +10,9 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.blobcache.common.ByteRange; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; @@ -329,7 +329,7 @@ private long numberOfEntriesInCache() { private void refreshSystemIndex(boolean failIfNotExist) { try { - final RefreshResponse refreshResponse = systemClient().admin() + final BroadcastResponse refreshResponse = systemClient().admin() .indices() .prepareRefresh(SNAPSHOT_BLOB_CACHE_INDEX) .setIndicesOptions(failIfNotExist ? RefreshRequest.DEFAULT_INDICES_OPTIONS : IndicesOptions.LENIENT_EXPAND_OPEN) diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java index 42ac63579b6c6..b260f6cf2a891 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/NodesCachesStatsIntegTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.searchablesnapshots.cache.shared; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.blobcache.shared.SharedBlobCacheService; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.ShardRouting; @@ -22,7 +23,6 @@ import org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots; import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheAction; import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheRequest; -import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheResponse; import org.elasticsearch.xpack.searchablesnapshots.action.cache.TransportSearchableSnapshotsNodeCachesStatsAction; import org.elasticsearch.xpack.searchablesnapshots.action.cache.TransportSearchableSnapshotsNodeCachesStatsAction.NodeCachesStatsResponse; import org.elasticsearch.xpack.searchablesnapshots.action.cache.TransportSearchableSnapshotsNodeCachesStatsAction.NodesCachesStatsResponse; @@ -117,7 +117,7 @@ public void testNodesCachesStats() throws Exception { assertExecutorIsIdle(SearchableSnapshots.CACHE_FETCH_ASYNC_THREAD_POOL_NAME); - final ClearSearchableSnapshotsCacheResponse clearCacheResponse = client().execute( + final BroadcastResponse clearCacheResponse = client().execute( ClearSearchableSnapshotsCacheAction.INSTANCE, new ClearSearchableSnapshotsCacheRequest(mountedIndex) ).actionGet(); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheAction.java index 9628bc75cd337..f57761b806599 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheAction.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheAction.java @@ -7,13 +7,14 @@ package org.elasticsearch.xpack.searchablesnapshots.action; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; -public class ClearSearchableSnapshotsCacheAction extends ActionType { +public class ClearSearchableSnapshotsCacheAction extends ActionType { public static final ClearSearchableSnapshotsCacheAction INSTANCE = new ClearSearchableSnapshotsCacheAction(); static final String NAME = "cluster:admin/xpack/searchable_snapshots/cache/clear"; private ClearSearchableSnapshotsCacheAction() { - super(NAME, ClearSearchableSnapshotsCacheResponse::new); + super(NAME, BroadcastResponse::new); } } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheResponse.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheResponse.java deleted file mode 100644 index 23a566f23d71b..0000000000000 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheResponse.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.searchablesnapshots.action; - -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; -import java.util.List; - -public class ClearSearchableSnapshotsCacheResponse extends BroadcastResponse { - - ClearSearchableSnapshotsCacheResponse(StreamInput in) throws IOException { - super(in); - } - - ClearSearchableSnapshotsCacheResponse( - int totalShards, - int successfulShards, - int failedShards, - List shardFailures - ) { - super(totalShards, successfulShards, failedShards, shardFailures); - } -} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportClearSearchableSnapshotsCacheAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportClearSearchableSnapshotsCacheAction.java index 8a4d21b4a98b8..077ee165d58ef 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportClearSearchableSnapshotsCacheAction.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportClearSearchableSnapshotsCacheAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.searchablesnapshots.action; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction.EmptyResult; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -24,7 +25,7 @@ public class TransportClearSearchableSnapshotsCacheAction extends AbstractTransportSearchableSnapshotsAction< ClearSearchableSnapshotsCacheRequest, - ClearSearchableSnapshotsCacheResponse, + BroadcastResponse, EmptyResult> { @Inject @@ -56,11 +57,11 @@ protected EmptyResult readShardResult(StreamInput in) { } @Override - protected ResponseFactory getResponseFactory( + protected ResponseFactory getResponseFactory( ClearSearchableSnapshotsCacheRequest request, ClusterState clusterState ) { - return (totalShards, successfulShards, failedShards, emptyResults, shardFailures) -> new ClearSearchableSnapshotsCacheResponse( + return (totalShards, successfulShards, failedShards, emptyResults, shardFailures) -> new BroadcastResponse( totalShards, successfulShards, failedShards, diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java index 1e1d8a7f0654c..3fbcd00690e82 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DlsFlsRequestCacheTests.java @@ -9,9 +9,8 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.SecureString; @@ -377,14 +376,14 @@ private void prepareIndices() { assertCacheState(DLS_TEMPLATE_ROLE_QUERY_INDEX, 0, 0); // Force merge the index to ensure there can be no background merges during the subsequent searches that would invalidate the cache - final ForceMergeResponse forceMergeResponse = indicesAdmin().prepareForceMerge( + final BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge( DLS_INDEX, FLS_INDEX, INDEX, DLS_TEMPLATE_ROLE_QUERY_INDEX ).setFlush(true).get(); ElasticsearchAssertions.assertAllSuccessful(forceMergeResponse); - final RefreshResponse refreshResponse = indicesAdmin().prepareRefresh(DLS_INDEX, FLS_INDEX, INDEX, DLS_TEMPLATE_ROLE_QUERY_INDEX) + final BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh(DLS_INDEX, FLS_INDEX, INDEX, DLS_TEMPLATE_ROLE_QUERY_INDEX) .get(); assertThat(refreshResponse.getFailedShards(), equalTo(0)); ensureGreen(DLS_INDEX, FLS_INDEX, INDEX, DLS_TEMPLATE_ROLE_QUERY_INDEX); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index 1329158f57d4d..a693c192f5fd2 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -15,10 +15,10 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.TransportGetAction; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -810,7 +810,7 @@ private void doTestDeletionBehaviorWhenKeysBecomeInvalidBeforeAndAfterRetentionP private void refreshSecurityIndex() throws Exception { assertBusy(() -> { - final RefreshResponse refreshResponse = indicesAdmin().prepareRefresh(SECURITY_MAIN_ALIAS).get(); + final BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh(SECURITY_MAIN_ALIAS).get(); assertThat(refreshResponse.getFailedShards(), is(0)); }); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index a088e6c61822a..3386c1d7930b5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; @@ -34,6 +33,7 @@ import org.elasticsearch.action.search.TransportSearchScrollAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.Client; @@ -245,7 +245,7 @@ protected void listener.onResponse((Response) response); } else if (RefreshAction.NAME.equals(action.name())) { assertThat(request, instanceOf(RefreshRequest.class)); - listener.onResponse((Response) mock(RefreshResponse.class)); + listener.onResponse((Response) mock(BroadcastResponse.class)); } else { super.doExecute(action, request, listener); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java index 843dee43706f8..1d44ed5a1f8ef 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/IndexBasedTransformConfigManager.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.TransportIndexAction; @@ -27,6 +26,7 @@ import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -848,7 +848,7 @@ public void refresh(ActionListener listener) { client.threadPool().getThreadContext(), TRANSFORM_ORIGIN, new RefreshRequest(TransformInternalIndexConstants.LATEST_INDEX_NAME), - ActionListener.wrap(r -> listener.onResponse(true), listener::onFailure), + ActionListener.wrap(r -> listener.onResponse(true), listener::onFailure), client.admin().indices()::refresh ); } diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java index d1153b6eca3e6..265b252082c68 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/test/integration/SingleNodeTests.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.watcher.test.integration; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse; import org.elasticsearch.rest.RestStatus; @@ -65,7 +65,7 @@ public void testThatLoadingWithNonExistingIndexWorks() throws Exception { ensureGreen(HistoryStoreField.DATA_STREAM); assertBusy(() -> { - RefreshResponse refreshResponse = indicesAdmin().prepareRefresh(".watcher-history*").get(); + BroadcastResponse refreshResponse = indicesAdmin().prepareRefresh(".watcher-history*").get(); assertThat(refreshResponse.getStatus(), equalTo(RestStatus.OK)); assertResponse( prepareSearch(".watcher-history*").setSize(0), diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index ea9295600fe41..a067b99c6bff0 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -10,11 +10,11 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -404,7 +404,7 @@ private Collection loadWatches(ClusterState clusterState) { // Non private for unit testing purposes void refreshWatches(IndexMetadata indexMetadata) { - RefreshResponse refreshResponse = client.admin() + BroadcastResponse refreshResponse = client.admin() .indices() .refresh(new RefreshRequest(INDEX)) .actionGet(TimeValue.timeValueSeconds(5)); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index c2ed68d8fa1bd..59e646654a18c 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchRequest; @@ -21,6 +20,7 @@ import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.search.TransportSearchScrollAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -162,12 +162,12 @@ void stopExecutor() {} ClusterState clusterState = csBuilder.build(); // response setup, successful refresh response - RefreshResponse refreshResponse = mock(RefreshResponse.class); + BroadcastResponse refreshResponse = mock(BroadcastResponse.class); when(refreshResponse.getSuccessfulShards()).thenReturn( clusterState.getMetadata().getIndices().get(Watch.INDEX).getNumberOfShards() ); doAnswer(invocation -> { - ActionListener listener = (ActionListener) invocation.getArguments()[2]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(refreshResponse); return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(RefreshRequest.class), anyActionListener()); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java index 60fa2581b4218..ee200dd7912c5 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; -import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkProcessor2; @@ -24,6 +23,7 @@ import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.search.TransportSearchScrollAction; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -202,7 +202,7 @@ public void testFindTriggeredWatchesGoodCase() { doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[2]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onResponse(mockRefreshResponse(1, 1)); return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(), any()); @@ -409,7 +409,7 @@ public void testIndexNotFoundButInMetadata() { doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[2]; + ActionListener listener = (ActionListener) invocation.getArguments()[2]; listener.onFailure(new IndexNotFoundException(TriggeredWatchStoreField.INDEX_NAME)); return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(), any()); @@ -507,8 +507,8 @@ public void testDeleteTriggeredWatches() throws Exception { assertThat(response.getItems().length, is(1)); } - private RefreshResponse mockRefreshResponse(int total, int successful) { - RefreshResponse refreshResponse = mock(RefreshResponse.class); + private BroadcastResponse mockRefreshResponse(int total, int successful) { + BroadcastResponse refreshResponse = mock(BroadcastResponse.class); when(refreshResponse.getTotalShards()).thenReturn(total); when(refreshResponse.getSuccessfulShards()).thenReturn(successful); return refreshResponse; From 8e1f2148eb6f9c0f4c21603f383125a003059986 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Tue, 16 Jan 2024 17:13:27 +0000 Subject: [PATCH 51/95] [ML] Deprecate machine learning on Intel macOS (#104087) PyTorch is no longer going to provide macOS x86_64 builds after version 2.2. This doesn't instantly affect us, as we build PyTorch from source ourselves and they've said that they will not deliberately break macOS x86_64. They just won't build or test on that platform themselves. As a result it's inevitable that we'll have to make some tweaks to the PyTorch code to get it to build, and as the years go by it will become harder and harder to make the code compile on an unsupported platform. Since PyTorch is such a critical component of Elastic ML we won't be able to keep it running on macOS x86_64 for more than a few releases after PyTorch drops support. This change gives notice of our intentions. --- docs/changelog/104087.yaml | 13 +++++++++++++ .../org/elasticsearch/xpack/ml/MachineLearning.java | 13 +++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 docs/changelog/104087.yaml diff --git a/docs/changelog/104087.yaml b/docs/changelog/104087.yaml new file mode 100644 index 0000000000000..614e2d0de7e58 --- /dev/null +++ b/docs/changelog/104087.yaml @@ -0,0 +1,13 @@ +pr: 104087 +summary: Deprecate machine learning on Intel macOS +area: Machine Learning +type: deprecation +issues: [] +deprecation: + title: Deprecate machine learning on Intel macOS + area: Packaging + details: The machine learning plugin will be permanently disabled on macOS x86_64 + in new minor versions released from December 2024 onwards. + impact: To continue to use machine learning functionality on macOS please switch to + an arm64 machine (Apple silicon). Alternatively, it will still be possible to run + Elasticsearch with machine learning enabled in a Docker container on macOS x86_64. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 09cb8644dba4f..152d8fde8c86c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -32,6 +32,8 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; @@ -68,6 +70,7 @@ import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.PersistentTaskPlugin; +import org.elasticsearch.plugins.Platforms; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.plugins.ShutdownAwarePlugin; @@ -753,6 +756,7 @@ public void loadExtensions(ExtensionLoader loader) { public static final int MAX_LOW_PRIORITY_MODELS_PER_NODE = 100; private static final Logger logger = LogManager.getLogger(MachineLearning.class); + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(MachineLearning.class); private final Settings settings; private final boolean enabled; @@ -919,6 +923,15 @@ public Collection createComponents(PluginServices services) { return List.of(new JobManagerHolder(), new MachineLearningExtensionHolder()); } + if ("darwin-x86_64".equals(Platforms.PLATFORM_NAME)) { + String msg = "The machine learning plugin will be permanently disabled on macOS x86_64 in new minor versions released " + + "from December 2024 onwards. To continue to use machine learning functionality on macOS please switch to an arm64 " + + "machine (Apple silicon). Alternatively, it will still be possible to run Elasticsearch with machine learning " + + "enabled in a Docker container on macOS x86_64."; + logger.warn(msg); + deprecationLogger.warn(DeprecationCategory.PLUGINS, "ml-darwin-x86_64", msg); + } + machineLearningExtension.get().configure(environment.settings()); this.mlUpgradeModeActionFilter.set(new MlUpgradeModeActionFilter(clusterService)); From 81ec404f56f92b395027762fe7956b675446357a Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Tue, 16 Jan 2024 17:59:51 +0000 Subject: [PATCH 52/95] Update ExecutorScalingQueue to workaround LinkedTransferQueue JDK bug (#104347) This commit adds a few overrides to ExecutorScalingQueue (subclass of LinkedTransferQueue) to workaround a JDK bug in LinkedTransferQueue. --- .../common/util/concurrent/EsExecutors.java | 23 +++++++++++ .../util/concurrent/EsExecutorsTests.java | 38 +++++++++++++++++++ .../concurrent/ExecutorScalingQueueTests.java | 35 +++++++++++++++++ 3 files changed, 96 insertions(+) create mode 100644 server/src/test/java/org/elasticsearch/common/util/concurrent/ExecutorScalingQueueTests.java diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index d7774d5c0a7ea..e6865e5c66e74 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -329,6 +329,29 @@ public boolean offer(E e) { } } + // Overridden to workaround a JDK bug introduced in JDK 21.0.2 + // https://bugs.openjdk.org/browse/JDK-8323659 + @Override + public void put(E e) { + // As the queue is unbounded, this method will always add to the queue. + super.offer(e); + } + + // Overridden to workaround a JDK bug introduced in JDK 21.0.2 + // https://bugs.openjdk.org/browse/JDK-8323659 + @Override + public boolean add(E e) { + // As the queue is unbounded, this method will never return false. + return super.offer(e); + } + + // Overridden to workaround a JDK bug introduced in JDK 21.0.2 + // https://bugs.openjdk.org/browse/JDK-8323659 + @Override + public boolean offer(E e, long timeout, TimeUnit unit) { + // As the queue is unbounded, this method will never return false. + return super.offer(e); + } } /** diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index fb9bde31e8fc4..cb1dddd7c51f3 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Processors; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.Matcher; import java.util.Locale; @@ -501,4 +502,41 @@ public void testNodeProcessorsFloatValidation() { } } + // This test must complete to ensure that our basic infrastructure is working as expected. + // Specifically that ExecutorScalingQueue, which subclasses LinkedTransferQueue, correctly + // tracks tasks submitted to the executor. + public void testBasicTaskExecution() { + final var executorService = EsExecutors.newScaling( + "test", + 0, + between(1, 5), + 60, + TimeUnit.SECONDS, + randomBoolean(), + EsExecutors.daemonThreadFactory("test"), + new ThreadContext(Settings.EMPTY) + ); + try { + final var countDownLatch = new CountDownLatch(between(1, 10)); + class TestTask extends AbstractRunnable { + @Override + protected void doRun() { + countDownLatch.countDown(); + if (countDownLatch.getCount() > 0) { + executorService.execute(TestTask.this); + } + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + } + + executorService.execute(new TestTask()); + safeAwait(countDownLatch); + } finally { + ThreadPool.terminate(executorService, 10, TimeUnit.SECONDS); + } + } } diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/ExecutorScalingQueueTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/ExecutorScalingQueueTests.java new file mode 100644 index 0000000000000..b1e1b9d620d2a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/ExecutorScalingQueueTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.util.concurrent; + +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.TimeUnit; + +public class ExecutorScalingQueueTests extends ESTestCase { + + public void testPut() { + var queue = new EsExecutors.ExecutorScalingQueue<>(); + queue.put(new Object()); + assertEquals(queue.size(), 1); + } + + public void testAdd() { + var queue = new EsExecutors.ExecutorScalingQueue<>(); + assertTrue(queue.add(new Object())); + assertEquals(queue.size(), 1); + } + + public void testTimedOffer() { + var queue = new EsExecutors.ExecutorScalingQueue<>(); + assertTrue(queue.offer(new Object(), 60, TimeUnit.SECONDS)); + assertEquals(queue.size(), 1); + } + +} From 7b1db0d988469694c6a0e53d3814ef7fb119065f Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 16 Jan 2024 10:06:59 -0800 Subject: [PATCH 53/95] Mute SymbolicLinkPreservingTarFuncTest --- .../gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy | 3 +++ 1 file changed, 3 insertions(+) diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy index beac84876ed71..3b05a2753f216 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy @@ -8,6 +8,8 @@ package org.elasticsearch.gradle.internal +import spock.lang.Ignore + import org.apache.commons.compress.archivers.tar.TarArchiveEntry import org.apache.commons.compress.archivers.tar.TarArchiveInputStream import org.apache.commons.compress.compressors.bzip2.BZip2CompressorInputStream @@ -21,6 +23,7 @@ import java.nio.file.Path import java.nio.file.Paths import java.util.function.Function +@Ignore("https://github.com/elastic/elasticsearch/issues/104428") class SymbolicLinkPreservingTarFuncTest extends AbstractGradleFuncTest { def setup() { From 24c4bca3320379592e40fa7ca2da72ca63a6982a Mon Sep 17 00:00:00 2001 From: Felix Barnsteiner Date: Tue, 16 Jan 2024 20:27:23 +0100 Subject: [PATCH 54/95] Fix routing_path when template has multiple path_match and multi-fields (#104418) --- docs/changelog/104418.yaml | 6 +++ .../DataStreamIndexSettingsProvider.java | 16 +++--- .../DataStreamIndexSettingsProviderTests.java | 49 +++++++++++++++++++ 3 files changed, 65 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/104418.yaml diff --git a/docs/changelog/104418.yaml b/docs/changelog/104418.yaml new file mode 100644 index 0000000000000..d27b66cebea87 --- /dev/null +++ b/docs/changelog/104418.yaml @@ -0,0 +1,6 @@ +pr: 104418 +summary: Fix `routing_path` when template has multiple `path_match` and multi-fields +area: TSDB +type: bug +issues: + - 104400 diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java index 519499addd77e..694e015b602f8 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java @@ -31,7 +31,7 @@ import java.io.UncheckedIOException; import java.time.Instant; import java.util.ArrayList; -import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Locale; @@ -177,14 +177,18 @@ private List findRoutingPaths(String indexName, Settings allSettings, Li } MappingParserContext parserContext = mapperService.parserContext(); - for (String pathMatch : template.pathMatch()) { + for (Iterator iterator = template.pathMatch().iterator(); iterator.hasNext();) { var mapper = parserContext.typeParser(mappingSnippetType) - // Since FieldMapper.parse modifies the Map passed in (removing entries for "type"), that means - // that only the first pathMatch passed in gets recognized as a time_series_dimension. To counteract - // that, we wrap the mappingSnippet in a new HashMap for each pathMatch instance. - .parse(pathMatch, new HashMap<>(mappingSnippet), parserContext) + .parse(iterator.next(), mappingSnippet, parserContext) .build(MapperBuilderContext.root(false, false)); extractPath(routingPaths, mapper); + if (iterator.hasNext()) { + // Since FieldMapper.parse modifies the Map passed in (removing entries for "type"), that means + // that only the first pathMatch passed in gets recognized as a time_series_dimension. + // To avoid this, each parsing call uses a new mapping snippet. + // Note that a shallow copy of the mappingSnippet map is not enough if there are multi-fields. + mappingSnippet = template.mappingForName(templateName, KeywordFieldMapper.CONTENT_TYPE); + } } } return routingPaths; diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java index 62d07467d5086..db0e3e5cd6258 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java @@ -493,6 +493,55 @@ public void testGenerateRoutingPathFromDynamicTemplateWithMultiplePathMatchEntri assertEquals(3, routingPathList.size()); } + public void testGenerateRoutingPathFromDynamicTemplateWithMultiplePathMatchEntriesMultiFields() throws Exception { + Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); + String mapping = """ + { + "_doc": { + "dynamic_templates": [ + { + "labels": { + "path_match": ["xprometheus.labels.*", "yprometheus.labels.*"], + "mapping": { + "type": "keyword", + "time_series_dimension": true, + "fields": { + "text": { + "type": "text" + } + } + } + } + } + ], + "properties": { + "host": { + "properties": { + "id": { + "type": "keyword", + "time_series_dimension": true + } + } + }, + "another_field": { + "type": "keyword" + } + } + } + } + """; + Settings result = generateTsdbSettings(mapping, now); + assertThat(result.size(), equalTo(3)); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); + assertThat( + IndexMetadata.INDEX_ROUTING_PATH.get(result), + containsInAnyOrder("host.id", "xprometheus.labels.*", "yprometheus.labels.*") + ); + List routingPathList = IndexMetadata.INDEX_ROUTING_PATH.get(result); + assertEquals(3, routingPathList.size()); + } + public void testGenerateRoutingPathFromDynamicTemplate_templateWithNoPathMatch() throws Exception { Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); String mapping = """ From a3c2b2d611579ac823c00ee427aff13ff1778161 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 16 Jan 2024 20:22:33 +0000 Subject: [PATCH 55/95] Add stable master indicator troubleshooting links (#104395) Adds links from the stable master health indicator to the relevant troubleshooting docs, as well as making the "contact support" link a versioned link directly to the right subsection of the troubleshooting docs page. --- docs/reference/troubleshooting.asciidoc | 9 ++- .../StableMasterHealthIndicatorService.java | 57 ++++++++++++++----- .../elasticsearch/common/ReferenceDocs.java | 1 + .../common/reference-docs-links.json | 3 +- ...ableMasterHealthIndicatorServiceTests.java | 13 +++-- 5 files changed, 61 insertions(+), 22 deletions(-) diff --git a/docs/reference/troubleshooting.asciidoc b/docs/reference/troubleshooting.asciidoc index e5ad75e048c1b..de1f9e6c7a608 100644 --- a/docs/reference/troubleshooting.asciidoc +++ b/docs/reference/troubleshooting.asciidoc @@ -58,9 +58,14 @@ fix problems that an {es} deployment might encounter. * <> * <> -If none of these solutions relate to your issue, you can still get help: +[discrete] +[[troubleshooting-contact-support]] +=== Contact us + +If none of these guides relate to your issue, or you need further assistance, +then you can contact us as follows: -* For users with an active subscription, you can get help in several ways: +* If you have an active subscription, you have several options: ** Go directly to the http://support.elastic.co[Support Portal] diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java index 56289ab348a3a..80b4b455912e7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java @@ -10,6 +10,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.core.Nullable; import org.elasticsearch.health.Diagnosis; import org.elasticsearch.health.HealthIndicatorDetails; @@ -39,14 +40,36 @@ public class StableMasterHealthIndicatorService implements HealthIndicatorService { public static final String NAME = "master_is_stable"; - public static final String GET_HELP_GUIDE = "https://ela.st/getting-help"; + + public static final Diagnosis TROUBLESHOOT_DISCOVERY = new Diagnosis( + new Diagnosis.Definition( + NAME, + "troubleshoot_discovery", + "The Elasticsearch cluster does not have a stable master node.", + "See discovery troubleshooting guidance at " + ReferenceDocs.DISCOVERY_TROUBLESHOOTING, + ReferenceDocs.DISCOVERY_TROUBLESHOOTING.toString() + ), + null + ); + + public static final Diagnosis TROUBLESHOOT_UNSTABLE_CLUSTER = new Diagnosis( + new Diagnosis.Definition( + NAME, + "troubleshoot_unstable_cluster", + "The Elasticsearch cluster does not have a stable master node.", + "See unstable cluster troubleshooting guidance at " + ReferenceDocs.UNSTABLE_CLUSTER_TROUBLESHOOTING, + ReferenceDocs.UNSTABLE_CLUSTER_TROUBLESHOOTING.toString() + ), + null + ); + public static final Diagnosis CONTACT_SUPPORT = new Diagnosis( new Diagnosis.Definition( NAME, "contact_support", "The Elasticsearch cluster does not have a stable master node.", - "Get help at " + GET_HELP_GUIDE, - GET_HELP_GUIDE + "Get help at " + ReferenceDocs.CONTACT_SUPPORT, + ReferenceDocs.CONTACT_SUPPORT.toString() ), null ); @@ -67,12 +90,13 @@ public class StableMasterHealthIndicatorService implements HealthIndicatorServic public static final String BACKUP_DISABLED_IMPACT_ID = "backup_disabled"; // Impacts of having an unstable master: - private static final String UNSTABLE_MASTER_INGEST_IMPACT = "The cluster cannot create, delete, or rebalance indices, and cannot " - + "insert or update documents."; - private static final String UNSTABLE_MASTER_DEPLOYMENT_MANAGEMENT_IMPACT = "Scheduled tasks such as Watcher, Index Lifecycle " - + "Management, and Snapshot Lifecycle Management will not work. The _cat APIs will not work."; - private static final String UNSTABLE_MASTER_BACKUP_IMPACT = "Snapshot and restore will not work, your data will not be backed up. " - + "Searchable snapshots cannot be mounted."; + private static final String UNSTABLE_MASTER_INGEST_IMPACT = """ + The cluster cannot create, delete, or rebalance indices, and cannot insert or update documents."""; + private static final String UNSTABLE_MASTER_DEPLOYMENT_MANAGEMENT_IMPACT = """ + Scheduled tasks such as Watcher, Index Lifecycle Management, and Snapshot Lifecycle Management will not work. \ + The _cat APIs will not work."""; + private static final String UNSTABLE_MASTER_BACKUP_IMPACT = """ + Snapshot and restore will not work. Your data will not be backed up, and searchable snapshots cannot be mounted."""; /** * This is the list of the impacts to be reported when the master node is determined to be unstable. @@ -128,7 +152,7 @@ HealthIndicatorResult getHealthIndicatorResult( HealthStatus status = HealthStatus.fromCoordinationDiagnosticsStatus(coordinationDiagnosticsResult.status()); HealthIndicatorDetails details = getDetails(coordinationDiagnosticsResult.details(), explain); Collection impacts = status.indicatesHealthProblem() ? UNSTABLE_MASTER_IMPACTS : List.of(); - List diagnosis = status.indicatesHealthProblem() ? getContactSupportUserActions(explain) : List.of(); + List diagnosis = status.indicatesHealthProblem() ? getUnstableMasterDiagnoses(explain) : List.of(); return createIndicator(status, coordinationDiagnosticsResult.summary(), details, impacts, diagnosis); } @@ -215,13 +239,16 @@ private String getNameForNodeId(String nodeId) { } /** - * This method returns the only user action that is relevant when the master is unstable -- contact support. - * @param explain If true, the returned list includes a UserAction to contact support, otherwise an empty list - * @return a single UserAction instructing users to contact support. + * This method returns the relevant user actions when the master is unstable, linking to some troubleshooting docs and suggesting to + * contact support. + * + * @param explain If true, the returned list includes UserActions linking to troubleshooting docs and another to contact support, + * otherwise an empty list. + * @return the relevant user actions when the master is unstable. */ - private static List getContactSupportUserActions(boolean explain) { + private List getUnstableMasterDiagnoses(boolean explain) { if (explain) { - return List.of(CONTACT_SUPPORT); + return List.of(TROUBLESHOOT_DISCOVERY, TROUBLESHOOT_UNSTABLE_CLUSTER, CONTACT_SUPPORT); } else { return List.of(); } diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java index 80c969cc1b084..67a9e23f2297f 100644 --- a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -69,6 +69,7 @@ public enum ReferenceDocs { BOOTSTRAP_CHECK_TLS, BOOTSTRAP_CHECK_TOKEN_SSL, BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP, + CONTACT_SUPPORT, // this comment keeps the ';' on the next line so every entry above has a trailing ',' which makes the diff for adding new links cleaner ; diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json index d8b4ed1ff93c9..46e32300e70fd 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json @@ -29,5 +29,6 @@ "BOOTSTRAP_CHECK_ROLE_MAPPINGS": "bootstrap-checks-xpack.html#_role_mappings_check", "BOOTSTRAP_CHECK_TLS": "bootstrap-checks-xpack.html#bootstrap-checks-tls", "BOOTSTRAP_CHECK_TOKEN_SSL": "bootstrap-checks-xpack.html#_token_ssl_check", - "BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP": "security-minimal-setup.html" + "BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP": "security-minimal-setup.html", + "CONTACT_SUPPORT": "troubleshooting.html#troubleshooting-contact-support" } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java index 18385b1d7ad44..77c59fe9e8209 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java @@ -43,9 +43,9 @@ import java.util.Map; import java.util.UUID; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -126,9 +126,14 @@ public void testGetHealthIndicatorResultNotGreenVerboseTrue() throws Exception { assertThat(nodeIdToClusterFormationMap.get(node2.getId()), equalTo(node2ClusterFormation)); assertThat(nodeIdToNodeNameMap.get(node1.getId()), equalTo(node1.getName())); assertThat(nodeIdToNodeNameMap.get(node2.getId()), equalTo(node2.getName())); - List diagnosis = result.diagnosisList(); - assertThat(diagnosis.size(), equalTo(1)); - assertThat(diagnosis.get(0), is(StableMasterHealthIndicatorService.CONTACT_SUPPORT)); + assertThat( + result.diagnosisList(), + containsInAnyOrder( + StableMasterHealthIndicatorService.CONTACT_SUPPORT, + StableMasterHealthIndicatorService.TROUBLESHOOT_DISCOVERY, + StableMasterHealthIndicatorService.TROUBLESHOOT_UNSTABLE_CLUSTER + ) + ); } public void testGetHealthIndicatorResultNotGreenVerboseFalse() throws Exception { From eb9629e9a864fbcb07624207c0d4b697939c31c3 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 16 Jan 2024 20:27:25 +0000 Subject: [PATCH 56/95] Report current master in PeerFinder (#104396) If we cannot join a cluster because the currently-elected master is unreachable to us, but we can reach enough other master-eligible nodes, then it can be a little hard to determine the problem from the logs: the `ClusterFormationFailureHelper` reports that we have discovered a possible quorum, but this is no help if we cannot contact the master itself. The `PeerFinder` eventually starts to report the connection failures, but it's hard to tell that they're the important onces. This commit adjusts the logging from the `PeerFinder` to include an indication that the connection we're attempting is to a node that was recently believed to be the elected master by one of our peers. --- docs/changelog/104396.yaml | 5 + .../org/elasticsearch/TransportVersions.java | 1 + .../ClusterFormationFailureHelper.java | 25 ++++- .../cluster/coordination/Coordinator.java | 1 + .../elasticsearch/discovery/PeerFinder.java | 32 ++++++- .../ClusterFormationInfoActionTests.java | 4 + .../ClusterFormationFailureHelperTests.java | 93 ++++++++++++++++++- .../CoordinationDiagnosticsServiceTests.java | 1 + .../discovery/PeerFinderTests.java | 40 ++++++++ 9 files changed, 194 insertions(+), 8 deletions(-) create mode 100644 docs/changelog/104396.yaml diff --git a/docs/changelog/104396.yaml b/docs/changelog/104396.yaml new file mode 100644 index 0000000000000..586fdc1b22624 --- /dev/null +++ b/docs/changelog/104396.yaml @@ -0,0 +1,5 @@ +pr: 104396 +summary: Report current master in `PeerFinder` +area: Cluster Coordination +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index fc43d47f29471..0951f1b42e8b5 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -184,6 +184,7 @@ static TransportVersion def(int id) { public static final TransportVersion ML_INFERENCE_REQUEST_INPUT_TYPE_ADDED = def(8_572_00_0); public static final TransportVersion ESQL_ENRICH_POLICY_CCQ_MODE = def(8_573_00_0); public static final TransportVersion DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ = def(8_574_00_0); + public static final TransportVersion PEERFINDER_REPORTS_PEERS_MASTERS = def(8_575_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java index 5134f153a7fbb..c2cd403836593 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; import org.elasticsearch.cluster.coordination.CoordinationState.VoteCollection; @@ -140,6 +141,7 @@ public record ClusterFormationState( VotingConfiguration lastCommittedConfiguration, List resolvedAddresses, List foundPeers, + Set mastersOfPeers, long currentTerm, boolean hasDiscoveredQuorum, StatusInfo statusInfo, @@ -151,6 +153,7 @@ public ClusterFormationState( ClusterState clusterState, List resolvedAddresses, List foundPeers, + Set mastersOfPeers, long currentTerm, ElectionStrategy electionStrategy, StatusInfo statusInfo, @@ -166,6 +169,7 @@ public ClusterFormationState( clusterState.getLastCommittedConfiguration(), resolvedAddresses, foundPeers, + mastersOfPeers, currentTerm, calculateHasDiscoveredQuorum( foundPeers, @@ -216,6 +220,9 @@ public ClusterFormationState(StreamInput in) throws IOException { new VotingConfiguration(in), in.readCollectionAsImmutableList(TransportAddress::new), in.readCollectionAsImmutableList(DiscoveryNode::new), + in.getTransportVersion().onOrAfter(TransportVersions.PEERFINDER_REPORTS_PEERS_MASTERS) + ? in.readCollectionAsImmutableSet(DiscoveryNode::new) + : Set.of(), in.readLong(), in.readBoolean(), new StatusInfo(in), @@ -250,12 +257,19 @@ private String getCoordinatorDescription() { acceptedTerm ); - final StringBuilder foundPeersDescription = new StringBuilder(); + final StringBuilder foundPeersDescription = new StringBuilder("["); DiscoveryNodes.addCommaSeparatedNodesWithoutAttributes(foundPeers.iterator(), foundPeersDescription); + if (mastersOfPeers.isEmpty()) { + foundPeersDescription.append(']'); + } else { + foundPeersDescription.append("] who claim current master to be ["); + DiscoveryNodes.addCommaSeparatedNodesWithoutAttributes(mastersOfPeers.iterator(), foundPeersDescription); + foundPeersDescription.append(']'); + } final String discoveryStateIgnoringQuorum = String.format( Locale.ROOT, - "have discovered [%s]; %s", + "have discovered %s; %s", foundPeersDescription, discoveryWillContinueDescription ); @@ -291,7 +305,7 @@ private String getCoordinatorDescription() { if (lastCommittedConfiguration.equals(VotingConfiguration.MUST_JOIN_ELECTED_MASTER)) { return String.format( Locale.ROOT, - "master not discovered yet and this node was detached from its previous cluster, have discovered [%s]; %s", + "master not discovered yet and this node was detached from its previous cluster, have discovered %s; %s", foundPeersDescription, discoveryWillContinueDescription ); @@ -310,7 +324,7 @@ private String getCoordinatorDescription() { return String.format( Locale.ROOT, - "master not discovered or elected yet, an election requires %s, %s [%s]; %s", + "master not discovered or elected yet, an election requires %s, %s %s; %s", quorumDescription, haveDiscoveredQuorum, foundPeersDescription, @@ -388,6 +402,9 @@ public void writeTo(StreamOutput out) throws IOException { lastCommittedConfiguration.writeTo(out); out.writeCollection(resolvedAddresses); out.writeCollection(foundPeers); + if (out.getTransportVersion().onOrAfter(TransportVersions.PEERFINDER_REPORTS_PEERS_MASTERS)) { + out.writeCollection(mastersOfPeers); + } out.writeLong(currentTerm); out.writeBoolean(hasDiscoveredQuorum); statusInfo.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 3da890b37ade8..927ca1152a658 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -334,6 +334,7 @@ public ClusterFormationState getClusterFormationState() { getLastAcceptedState(), // doesn't care about blocks or the current master node so no need for getStateForMasterService peerFinder.getLastResolvedAddresses(), Stream.concat(Stream.of(getLocalNode()), StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false)).toList(), + peerFinder.getMastersOfPeers(), getCurrentTerm(), electionStrategy, nodeHealthService.getHealth(), diff --git a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java index ec315f5200978..5289ac57e10ca 100644 --- a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java @@ -40,6 +40,7 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.Executor; +import java.util.stream.Collectors; import static java.util.Collections.emptyList; import static org.elasticsearch.core.Strings.format; @@ -352,10 +353,17 @@ protected void startProbe(TransportAddress transportAddress) { } } + public Set getMastersOfPeers() { + synchronized (mutex) { + return peersByAddress.values().stream().flatMap(p -> p.lastKnownMasterNode.stream()).collect(Collectors.toSet()); + } + } + private class Peer { private final TransportAddress transportAddress; private final SetOnce probeConnectionResult = new SetOnce<>(); private volatile boolean peersRequestInFlight; + private Optional lastKnownMasterNode = Optional.empty(); Peer(TransportAddress transportAddress) { this.transportAddress = transportAddress; @@ -439,9 +447,20 @@ public void onResponse(ProbeConnectionResult connectResult) { @Override public void onFailure(Exception e) { if (verboseFailureLogging) { + + final String believedMasterBy; + synchronized (mutex) { + believedMasterBy = peersByAddress.values() + .stream() + .filter(p -> p.lastKnownMasterNode.map(DiscoveryNode::getAddress).equals(Optional.of(transportAddress))) + .findFirst() + .map(p -> " [current master according to " + p.getDiscoveryNode().descriptionWithoutAttributes() + "]") + .orElse(""); + } + if (logger.isDebugEnabled()) { // log message at level WARN, but since DEBUG logging is enabled we include the full stack trace - logger.warn(() -> format("%s discovery result", Peer.this), e); + logger.warn(() -> format("%s%s discovery result", Peer.this, believedMasterBy), e); } else { final StringBuilder messageBuilder = new StringBuilder(); Throwable cause = e; @@ -452,7 +471,7 @@ public void onFailure(Exception e) { final String message = messageBuilder.length() < 1024 ? messageBuilder.toString() : (messageBuilder.substring(0, 1023) + "..."); - logger.warn("{} discovery result{}", Peer.this, message); + logger.warn("{}{} discovery result{}", Peer.this, believedMasterBy, message); } } else { logger.debug(() -> format("%s discovery result", Peer.this), e); @@ -504,6 +523,7 @@ public void handleResponse(PeersResponse response) { return; } + lastKnownMasterNode = response.getMasterNode(); response.getMasterNode().ifPresent(node -> startProbe(node.getAddress())); for (DiscoveryNode node : response.getKnownPeers()) { startProbe(node.getAddress()); @@ -545,7 +565,13 @@ Releasable getConnectionReference() { @Override public String toString() { - return "address [" + transportAddress + "], node [" + getDiscoveryNode() + "], requesting [" + peersRequestInFlight + "]"; + return "address [" + + transportAddress + + "], node [" + + Optional.ofNullable(probeConnectionResult.get()) + .map(result -> result.getDiscoveryNode().descriptionWithoutAttributes()) + .orElse("unknown") + + (peersRequestInFlight ? " [request in flight]" : ""); } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoActionTests.java index 38c811d367560..9c7fa266a0762 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/coordination/ClusterFormationInfoActionTests.java @@ -69,6 +69,7 @@ private ClusterFormationInfoAction.Response mutateResponse(ClusterFormationInfoA clusterFormationState.lastCommittedConfiguration(), clusterFormationState.resolvedAddresses(), clusterFormationState.foundPeers(), + clusterFormationState.mastersOfPeers(), clusterFormationState.currentTerm(), clusterFormationState.hasDiscoveredQuorum(), clusterFormationState.statusInfo(), @@ -88,6 +89,7 @@ private ClusterFormationInfoAction.Response mutateResponse(ClusterFormationInfoA clusterFormationState.lastCommittedConfiguration(), clusterFormationState.resolvedAddresses(), clusterFormationState.foundPeers(), + clusterFormationState.mastersOfPeers(), clusterFormationState.currentTerm(), clusterFormationState.hasDiscoveredQuorum(), clusterFormationState.statusInfo(), @@ -107,6 +109,7 @@ private ClusterFormationInfoAction.Response mutateResponse(ClusterFormationInfoA clusterFormationState.lastCommittedConfiguration(), clusterFormationState.resolvedAddresses(), clusterFormationState.foundPeers(), + clusterFormationState.mastersOfPeers(), clusterFormationState.currentTerm(), clusterFormationState.hasDiscoveredQuorum() == false, clusterFormationState.statusInfo(), @@ -148,6 +151,7 @@ private ClusterFormationFailureHelper.ClusterFormationState getClusterFormationS new CoordinationMetadata.VotingConfiguration(Collections.emptySet()), Collections.emptyList(), Collections.emptyList(), + Collections.emptySet(), randomLong(), randomBoolean(), new StatusInfo(randomFrom(StatusInfo.Status.HEALTHY, StatusInfo.Status.UNHEALTHY), randomAlphaOfLength(20)), diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java index 395dde29597d3..ae557b1b418da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -42,6 +42,7 @@ import static java.util.Collections.emptyList; import static java.util.Collections.emptySet; +import static java.util.Collections.singleton; import static java.util.Collections.singletonList; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.BOOTSTRAP_PLACEHOLDER_PREFIX; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; @@ -91,6 +92,7 @@ public void testScheduling() { clusterState, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -189,6 +191,7 @@ public void testDescriptionOnMasterIneligibleNodes() { clusterState, emptyList(), emptyList(), + emptySet(), 15L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -207,6 +210,7 @@ public void testDescriptionOnMasterIneligibleNodes() { clusterState, singletonList(otherAddress), emptyList(), + emptySet(), 16L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -226,6 +230,7 @@ public void testDescriptionOnMasterIneligibleNodes() { clusterState, emptyList(), singletonList(otherNode), + emptySet(), 17L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -253,6 +258,7 @@ public void testDescriptionOnUnhealthyNodes() { clusterState, emptyList(), emptyList(), + emptySet(), 15L, electionStrategy, new StatusInfo(UNHEALTHY, "unhealthy-info"), @@ -273,6 +279,7 @@ public void testDescriptionOnUnhealthyNodes() { clusterState, emptyList(), emptyList(), + emptySet(), 15L, electionStrategy, new StatusInfo(UNHEALTHY, "unhealthy-info"), @@ -296,6 +303,7 @@ public void testDescriptionBeforeBootstrapping() { clusterState, emptyList(), emptyList(), + emptySet(), 1L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -317,6 +325,7 @@ public void testDescriptionBeforeBootstrapping() { clusterState, singletonList(otherAddress), emptyList(), + emptySet(), 2L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -340,6 +349,7 @@ public void testDescriptionBeforeBootstrapping() { clusterState, emptyList(), singletonList(otherNode), + emptySet(), 3L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -362,6 +372,7 @@ public void testDescriptionBeforeBootstrapping() { clusterState, emptyList(), emptyList(), + emptySet(), 4L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -414,6 +425,7 @@ public void testDescriptionAfterDetachCluster() { clusterState, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -435,6 +447,7 @@ public void testDescriptionAfterDetachCluster() { clusterState, singletonList(otherAddress), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -458,6 +471,7 @@ public void testDescriptionAfterDetachCluster() { clusterState, emptyList(), singletonList(otherNode), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -481,6 +495,7 @@ public void testDescriptionAfterDetachCluster() { clusterState, emptyList(), singletonList(yetAnotherNode), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -510,6 +525,7 @@ public void testDescriptionAfterBootstrapping() { clusterState, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -531,6 +547,7 @@ public void testDescriptionAfterBootstrapping() { clusterState, singletonList(otherAddress), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -554,6 +571,7 @@ public void testDescriptionAfterBootstrapping() { clusterState, emptyList(), singletonList(otherNode), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -577,6 +595,7 @@ public void testDescriptionAfterBootstrapping() { clusterState, emptyList(), singletonList(yetAnotherNode), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -593,12 +612,39 @@ public void testDescriptionAfterBootstrapping() { ) ); + final DiscoveryNode recentMaster = makeDiscoveryNode("recentMaster"); + assertThat( + new ClusterFormationState( + Settings.EMPTY, + clusterState, + emptyList(), + singletonList(yetAnotherNode), + singleton(recentMaster), + 0L, + electionStrategy, + new StatusInfo(HEALTHY, "healthy-info"), + emptyList() + ).getDescription(), + is( + "master not discovered or elected yet, an election requires a node with id [otherNode], " + + "have only discovered non-quorum [" + + noAttr(yetAnotherNode) + + "] who claim current master to be [" + + noAttr(recentMaster) + + "]; " + + "discovery will continue using [] from hosts providers and [" + + noAttr(localNode) + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0" + ) + ); + assertThat( new ClusterFormationState( Settings.EMPTY, state(localNode, "n1", "n2"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -619,6 +665,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, "n1", "n2", "n3"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -639,6 +686,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, "n1", "n2", BOOTSTRAP_PLACEHOLDER_PREFIX + "n3"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -659,6 +707,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, "n1", "n2", "n3", "n4"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -679,6 +728,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, "n1", "n2", "n3", "n4", "n5"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -699,6 +749,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, "n1", "n2", "n3", "n4", BOOTSTRAP_PLACEHOLDER_PREFIX + "n5"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -719,6 +770,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, "n1", "n2", "n3", BOOTSTRAP_PLACEHOLDER_PREFIX + "n4", BOOTSTRAP_PLACEHOLDER_PREFIX + "n5"), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -739,6 +791,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, new String[] { "n1" }, new String[] { "n1" }), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -759,6 +812,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, new String[] { "n1" }, new String[] { "n2" }), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -779,6 +833,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, new String[] { "n1" }, new String[] { "n2", "n3" }), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -799,6 +854,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, new String[] { "n1" }, new String[] { "n2", "n3", "n4" }), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -841,6 +897,7 @@ public void testDescriptionAfterBootstrapping() { stateWithOtherNodes, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -875,6 +932,7 @@ public void testDescriptionAfterBootstrapping() { state(localNode, GatewayMetaState.STALE_STATE_CONFIG_NODE_ID), emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -910,6 +968,7 @@ public void testJoinStatusReporting() { clusterState, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -924,6 +983,7 @@ public void testJoinStatusReporting() { clusterState, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -970,6 +1030,7 @@ public void testJoinStatusReporting() { clusterState, emptyList(), emptyList(), + emptySet(), 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -987,6 +1048,10 @@ public void testClusterFormationStateSerialization() { DiscoveryNodeUtils.create(UUID.randomUUID().toString()), DiscoveryNodeUtils.create(UUID.randomUUID().toString()) ); + Set mastersOfPeers = Set.of( + DiscoveryNodeUtils.create(UUID.randomUUID().toString()), + DiscoveryNodeUtils.create(UUID.randomUUID().toString()) + ); List joinStatuses = List.of( new JoinStatus( DiscoveryNodeUtils.create(UUID.randomUUID().toString()), @@ -1001,6 +1066,7 @@ public void testClusterFormationStateSerialization() { state(localNode, new String[] { "n1" }, new String[] { "n2", "n3", "n4" }), resolvedAddresses, foundPeers, + mastersOfPeers, 0L, electionStrategy, new StatusInfo(HEALTHY, "healthy-info"), @@ -1035,6 +1101,7 @@ private ClusterFormationState mutateClusterFormationState(ClusterFormationState final DiscoveryNode localNode = originalClusterFormationState.localNode(); List resolvedAddresses = originalClusterFormationState.resolvedAddresses(); List foundPeers = originalClusterFormationState.foundPeers(); + Set mastersOfPeers = originalClusterFormationState.mastersOfPeers(); long currentTerm = originalClusterFormationState.currentTerm(); StatusInfo statusInfo = originalClusterFormationState.statusInfo(); List joinStatuses = originalClusterFormationState.inFlightJoinStatuses(); @@ -1043,13 +1110,14 @@ private ClusterFormationState mutateClusterFormationState(ClusterFormationState originalClusterFormationState.lastAcceptedConfiguration(), originalClusterFormationState.lastCommittedConfiguration() ); - switch (randomIntBetween(1, 5)) { + switch (randomIntBetween(1, 6)) { case 1 -> { return new ClusterFormationState( settings, clusterState, resolvedAddresses, foundPeers, + mastersOfPeers, currentTerm + 1, electionStrategy, statusInfo, @@ -1064,6 +1132,7 @@ private ClusterFormationState mutateClusterFormationState(ClusterFormationState clusterState, resolvedAddresses, newFoundPeers, + mastersOfPeers, currentTerm, electionStrategy, statusInfo, @@ -1085,6 +1154,7 @@ private ClusterFormationState mutateClusterFormationState(ClusterFormationState clusterState, resolvedAddresses, foundPeers, + mastersOfPeers, currentTerm, electionStrategy, statusInfo, @@ -1098,6 +1168,7 @@ private ClusterFormationState mutateClusterFormationState(ClusterFormationState clusterState, resolvedAddresses, foundPeers, + mastersOfPeers, currentTerm, electionStrategy, newStatusInfo, @@ -1110,6 +1181,26 @@ private ClusterFormationState mutateClusterFormationState(ClusterFormationState clusterState, resolvedAddresses, foundPeers, + mastersOfPeers, + currentTerm, + electionStrategy, + statusInfo, + joinStatuses + ); + } + case 6 -> { + List newMastersOfPeers = new ArrayList<>(mastersOfPeers); + if (mastersOfPeers.isEmpty() || randomBoolean()) { + newMastersOfPeers.add(DiscoveryNodeUtils.create(UUID.randomUUID().toString())); + } else { + newMastersOfPeers.remove(0); + } + return new ClusterFormationState( + settings, + clusterState, + resolvedAddresses, + foundPeers, + Set.copyOf(newMastersOfPeers), currentTerm, electionStrategy, statusInfo, diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java index 0d93dfb3d7f62..2ad0f18de277f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsServiceTests.java @@ -993,6 +993,7 @@ private ClusterFormationFailureHelper.ClusterFormationState getClusterFormationS hasDiscoveredAllNodes ? allMasterEligibleNodes : randomSubsetOf(randomInt(allMasterEligibleNodes.size() - 1), allMasterEligibleNodes), + Collections.emptySet(), randomLong(), hasDiscoveredQuorum, new StatusInfo(randomFrom(StatusInfo.Status.HEALTHY, StatusInfo.Status.UNHEALTHY), randomAlphaOfLength(20)), diff --git a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java index ce5841d066d88..209261e8dce70 100644 --- a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java @@ -883,6 +883,46 @@ public boolean innerMatch(LogEvent event) { } } + @TestLogging(reason = "testing logging at WARN level", value = "org.elasticsearch.discovery:WARN") + public void testEventuallyLogsIfReturnedMasterIsUnreachable() { + final DiscoveryNode otherNode = newDiscoveryNode("node-from-hosts-list"); + providedAddresses.add(otherNode.getAddress()); + transportAddressConnector.addReachableNode(otherNode); + + peerFinder.activate(lastAcceptedNodes); + final long endTime = deterministicTaskQueue.getCurrentTimeMillis() + VERBOSITY_INCREASE_TIMEOUT_SETTING.get(Settings.EMPTY).millis() + + PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(Settings.EMPTY).millis(); + + runAllRunnableTasks(); + + assertFoundPeers(otherNode); + final DiscoveryNode unreachableMaster = newDiscoveryNode("unreachable-master"); + transportAddressConnector.unreachableAddresses.add(unreachableMaster.getAddress()); + + MockLogAppender.assertThatLogger(() -> { + while (deterministicTaskQueue.getCurrentTimeMillis() <= endTime) { + deterministicTaskQueue.advanceTime(); + runAllRunnableTasks(); + respondToRequests(node -> { + assertThat(node, is(otherNode)); + return new PeersResponse(Optional.of(unreachableMaster), emptyList(), randomNonNegativeLong()); + }); + } + }, + PeerFinder.class, + new MockLogAppender.SeenEventExpectation( + "discovery result", + "org.elasticsearch.discovery.PeerFinder", + Level.WARN, + "address [" + unreachableMaster.getAddress() + "]* [current master according to *node-from-hosts-list*" + ) + ); + + assertFoundPeers(otherNode); + assertThat(peerFinder.discoveredMasterNode, nullValue()); + assertFalse(peerFinder.discoveredMasterTerm.isPresent()); + } + public void testReconnectsToDisconnectedNodes() { final DiscoveryNode otherNode = newDiscoveryNode("original-node"); providedAddresses.add(otherNode.getAddress()); From c6b0d369936e01dcd9b8c1fb6aaf97b9ccccfc8b Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 16 Jan 2024 21:54:02 +0100 Subject: [PATCH 57/95] Remove redundat CancelTasksResponse (#104429) This class is the same as ListTasksResponse as are the tests for it. => remove and replace all use with the list tasks response. --- .../node/tasks/CancellableTasksIT.java | 13 +- .../admin/cluster/node/tasks/TasksIT.java | 3 +- .../search/ccs/CrossClusterIT.java | 4 +- .../node/tasks/cancel/CancelTasksAction.java | 5 +- .../cancel/CancelTasksRequestBuilder.java | 3 +- .../tasks/cancel/CancelTasksResponse.java | 47 ------- .../cancel/TransportCancelTasksAction.java | 9 +- .../client/internal/ClusterAdminClient.java | 5 +- .../internal/support/AbstractClient.java | 5 +- .../PersistentTasksNodeService.java | 6 +- .../persistent/PersistentTasksService.java | 4 +- .../node/tasks/CancellableTasksTests.java | 9 +- .../node/tasks/TransportTasksActionTests.java | 3 +- .../PersistentTasksNodeServiceTests.java | 11 +- .../tasks/CancelTasksResponseTests.java | 123 ------------------ .../AbstractSearchCancellationTestCase.java | 3 +- .../search/CrossClusterAsyncSearchIT.java | 3 +- .../xpack/search/AsyncSearchTask.java | 4 +- .../AbstractEqlBlockingIntegTestCase.java | 3 +- .../TransportDeleteTrainedModelAction.java | 8 +- .../ml/dataframe/steps/ReindexingStep.java | 6 +- ...ransportDeleteTrainedModelActionTests.java | 21 ++- .../AbstractSqlBlockingIntegTestCase.java | 3 +- 23 files changed, 61 insertions(+), 240 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java delete mode 100644 server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index b20f658a01510..cb3eee3c60c23 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.LatchedActionListener; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.GroupedActionListener; @@ -166,7 +165,7 @@ public void testBanOnlyNodesWithOutstandingDescendantTasks() throws Exception { ActionFuture rootTaskFuture = client().execute(TransportTestAction.ACTION, rootRequest); Set pendingRequests = allowPartialRequest(rootRequest); TaskId rootTaskId = getRootTaskId(rootRequest); - ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() + ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() .setTargetTaskId(rootTaskId) .waitForCompletion(true) .execute(); @@ -215,10 +214,10 @@ public void testCancelTaskMultipleTimes() throws Exception { ActionFuture mainTaskFuture = client().execute(TransportTestAction.ACTION, rootRequest); TaskId taskId = getRootTaskId(rootRequest); allowPartialRequest(rootRequest); - CancelTasksResponse resp = clusterAdmin().prepareCancelTasks().setTargetTaskId(taskId).waitForCompletion(false).get(); + ListTasksResponse resp = clusterAdmin().prepareCancelTasks().setTargetTaskId(taskId).waitForCompletion(false).get(); assertThat(resp.getTaskFailures(), empty()); assertThat(resp.getNodeFailures(), empty()); - ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() + ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() .setTargetTaskId(taskId) .waitForCompletion(true) .execute(); @@ -226,7 +225,7 @@ public void testCancelTaskMultipleTimes() throws Exception { allowEntireRequest(rootRequest); assertThat(cancelFuture.actionGet().getTaskFailures(), empty()); waitForRootTask(mainTaskFuture, false); - CancelTasksResponse cancelError = clusterAdmin().prepareCancelTasks() + ListTasksResponse cancelError = clusterAdmin().prepareCancelTasks() .setTargetTaskId(taskId) .waitForCompletion(randomBoolean()) .get(); @@ -245,7 +244,7 @@ public void testDoNotWaitForCompletion() throws Exception { allowPartialRequest(rootRequest); } boolean waitForCompletion = randomBoolean(); - ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() + ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() .setTargetTaskId(taskId) .waitForCompletion(waitForCompletion) .execute(); @@ -311,7 +310,7 @@ public void testRemoveBanParentsOnDisconnect() throws Exception { client().execute(TransportTestAction.ACTION, rootRequest); Set pendingRequests = allowPartialRequest(rootRequest); TaskId rootTaskId = getRootTaskId(rootRequest); - ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() + ActionFuture cancelFuture = clusterAdmin().prepareCancelTasks() .setTargetTaskId(rootTaskId) .waitForCompletion(true) .execute(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 21497b2e6fcfb..3f0436c685781 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -503,7 +502,7 @@ public void testTasksCancellation() throws Exception { ); logger.info("--> cancelling the main test task"); - CancelTasksResponse cancelTasksResponse = clusterAdmin().prepareCancelTasks().setActions(TEST_TASK_ACTION.name()).get(); + ListTasksResponse cancelTasksResponse = clusterAdmin().prepareCancelTasks().setActions(TEST_TASK_ACTION.name()).get(); assertEquals(1, cancelTasksResponse.getTasks().size()); expectThrows(TaskCancelledException.class, future); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java index cf8d81f406f91..eedda05dcb102 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterIT.java @@ -11,7 +11,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -268,7 +268,7 @@ public void testCancel() throws Exception { final CancelTasksRequest cancelRequest = new CancelTasksRequest().setTargetTaskId(rootTask.taskId()); cancelRequest.setWaitForCompletion(randomBoolean()); - final ActionFuture cancelFuture = client().admin().cluster().cancelTasks(cancelRequest); + final ActionFuture cancelFuture = client().admin().cluster().cancelTasks(cancelRequest); assertBusy(() -> { final Iterable transportServices = cluster("cluster_a").getInstances(TransportService.class); for (TransportService transportService : transportServices) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java index 86d0206d62b65..3cba83305c0fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksAction.java @@ -9,16 +9,17 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; /** * ActionType for cancelling running tasks */ -public class CancelTasksAction extends ActionType { +public class CancelTasksAction extends ActionType { public static final CancelTasksAction INSTANCE = new CancelTasksAction(); public static final String NAME = "cluster:admin/tasks/cancel"; private CancelTasksAction() { - super(NAME, CancelTasksResponse::new); + super(NAME, ListTasksResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java index 45fc4e352a4ba..5fdd50e0c9e66 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestBuilder.java @@ -8,13 +8,14 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.support.tasks.TasksRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; /** * Builder for the request to cancel tasks running on the specified nodes */ -public class CancelTasksRequestBuilder extends TasksRequestBuilder { +public class CancelTasksRequestBuilder extends TasksRequestBuilder { public CancelTasksRequestBuilder(ElasticsearchClient client) { super(client, CancelTasksAction.INSTANCE, new CancelTasksRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java deleted file mode 100644 index a53ed8dacc36c..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.node.tasks.cancel; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.tasks.TaskInfo; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.util.List; - -/** - * Returns the list of tasks that were cancelled - */ -public class CancelTasksResponse extends ListTasksResponse { - - private static final ConstructingObjectParser PARSER = setupParser( - "cancel_tasks_response", - CancelTasksResponse::new - ); - - public CancelTasksResponse(StreamInput in) throws IOException { - super(in); - } - - public CancelTasksResponse( - List tasks, - List taskFailures, - List nodeFailures - ) { - super(tasks, taskFailures, nodeFailures); - } - - public static CancelTasksResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index aa7c19cf35514..1f3271be79797 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.service.ClusterService; @@ -31,7 +32,7 @@ * For a task to be cancellable it has to return an instance of * {@link CancellableTask} from {@link TransportRequest#createTask} */ -public class TransportCancelTasksAction extends TransportTasksAction { +public class TransportCancelTasksAction extends TransportTasksAction { @Inject public TransportCancelTasksAction(ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { @@ -41,7 +42,7 @@ public TransportCancelTasksAction(ClusterService clusterService, TransportServic transportService, actionFilters, CancelTasksRequest::new, - CancelTasksResponse::new, + ListTasksResponse::new, TaskInfo::from, // Cancellation is usually lightweight, and runs on the transport thread if the task didn't even start yet, but some // implementations of CancellableTask#onCancelled() are nontrivial so we use GENERIC here. TODO could it be SAME? @@ -50,13 +51,13 @@ public TransportCancelTasksAction(ClusterService clusterService, TransportServic } @Override - protected CancelTasksResponse newResponse( + protected ListTasksResponse newResponse( CancelTasksRequest request, List tasks, List taskOperationFailures, List failedNodeExceptions ) { - return new CancelTasksResponse(tasks, taskOperationFailures, failedNodeExceptions); + return new ListTasksResponse(tasks, taskOperationFailures, failedNodeExceptions); } protected List processTasks(CancelTasksRequest request) { diff --git a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java index a8365a62c9e58..8e9977696bc18 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequestBuilder; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskResponse; @@ -293,7 +292,7 @@ public interface ClusterAdminClient extends ElasticsearchClient { * @param request The nodes tasks request * @return The result future */ - ActionFuture cancelTasks(CancelTasksRequest request); + ActionFuture cancelTasks(CancelTasksRequest request); /** * Cancel active tasks @@ -301,7 +300,7 @@ public interface ClusterAdminClient extends ElasticsearchClient { * @param request The nodes tasks request * @param listener A listener to be notified with a result */ - void cancelTasks(CancelTasksRequest request, ActionListener listener); + void cancelTasks(CancelTasksRequest request, ActionListener listener); /** * Cancel active tasks diff --git a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java index f38cd7551dad7..c873ff884b642 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java @@ -35,7 +35,6 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequestBuilder; @@ -803,12 +802,12 @@ public GetTaskRequestBuilder prepareGetTask(TaskId taskId) { } @Override - public ActionFuture cancelTasks(CancelTasksRequest request) { + public ActionFuture cancelTasks(CancelTasksRequest request) { return execute(CancelTasksAction.INSTANCE, request); } @Override - public void cancelTasks(CancelTasksRequest request, ActionListener listener) { + public void cancelTasks(CancelTasksRequest request, ActionListener listener) { execute(CancelTasksAction.INSTANCE, request, listener); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java index 32188d55e418a..63c97685c913e 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.common.Strings; @@ -300,9 +300,9 @@ private void cancelTask(Long allocationId) { if (task.markAsCancelled()) { // Cancel the local task using the task manager String reason = "task has been removed, cancelling locally"; - persistentTasksService.sendCancelRequest(task.getId(), reason, new ActionListener() { + persistentTasksService.sendCancelRequest(task.getId(), reason, new ActionListener<>() { @Override - public void onResponse(CancelTasksResponse cancelTasksResponse) { + public void onResponse(ListTasksResponse cancelTasksResponse) { logger.trace( "Persistent task [{}] with id [{}] and allocation id [{}] was cancelled", task.getAction(), diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java index 869a93110d257..227569341919a 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.ClusterState; @@ -90,7 +90,7 @@ public void sendCompletionRequest( /** * Cancels a locally running task using the Task Manager API */ - void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { + void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { CancelTasksRequest request = new CancelTasksRequest(); request.setTargetTaskId(new TaskId(clusterService.localNode().getId(), taskId)); request.setReason(reason); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index 345f85470a056..adefd71f93590 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.support.ActionTestUtils; @@ -289,7 +288,7 @@ public void onFailure(Exception e) { request.setReason("Testing Cancellation"); request.setTargetTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); // And send the cancellation request to a random node - CancelTasksResponse response = ActionTestUtils.executeBlocking( + ListTasksResponse response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction, request ); @@ -368,7 +367,7 @@ public void onFailure(Exception e) { request.setReason("Testing Cancellation"); request.setTargetParentTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); // And send the cancellation request to a random node - CancelTasksResponse response = ActionTestUtils.executeBlocking( + ListTasksResponse response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(1, testNodes.length - 1)].transportCancelTasksAction, request ); @@ -487,7 +486,7 @@ public void onFailure(Exception e) { request.setReason("Testing Cancellation"); request.setTargetTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); // And send the cancellation request to a random node - CancelTasksResponse response = ActionTestUtils.executeBlocking(testNodes[0].transportCancelTasksAction, request); + ListTasksResponse response = ActionTestUtils.executeBlocking(testNodes[0].transportCancelTasksAction, request); logger.info("--> Done simulating issuing cancel request on the node that is about to leave the cluster"); // This node still thinks that's part of the cluster, so cancelling should look successful assertThat(response.getTasks().size(), lessThanOrEqualTo(1)); @@ -544,7 +543,7 @@ public void testNonExistingTaskCancellation() throws Exception { randomSubsetOf(randomIntBetween(1, testNodes.length - 1), testNodes).stream().map(TestNode::getNodeId).toArray(String[]::new) ); // And send the cancellation request to a random node - CancelTasksResponse response = ActionTestUtils.executeBlocking( + ListTasksResponse response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(1, testNodes.length - 1)].transportCancelTasksAction, request ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 86ccd9807cf9f..7168b2c1edcdd 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; @@ -521,7 +520,7 @@ public void testCancellingTasksThatDontSupportCancellation() throws Exception { request.setNodes(testNodes[0].getNodeId()); request.setReason("Testing Cancellation"); request.setActions(actionName); - CancelTasksResponse response = ActionTestUtils.executeBlocking( + ListTasksResponse response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction, request ); diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java index 7c85cba4c34eb..ee35491a74d00 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java @@ -9,7 +9,7 @@ package org.elasticsearch.persistent; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -253,12 +253,12 @@ public void testParamsStatusAndNodeTaskAreDelegated() throws Exception { public void testTaskCancellation() { AtomicLong capturedTaskId = new AtomicLong(); - AtomicReference> capturedListener = new AtomicReference<>(); + AtomicReference> capturedListener = new AtomicReference<>(); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); PersistentTasksService persistentTasksService = new PersistentTasksService(null, null, client) { @Override - void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { + void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { capturedTaskId.set(taskId); capturedListener.set(listener); } @@ -327,8 +327,7 @@ public void sendCompletionRequest( // That should trigger cancellation request assertThat(capturedTaskId.get(), equalTo(localId)); // Notify successful cancellation - capturedListener.get() - .onResponse(new CancelTasksResponse(Collections.emptyList(), Collections.emptyList(), Collections.emptyList())); + capturedListener.get().onResponse(new ListTasksResponse(Collections.emptyList(), Collections.emptyList(), Collections.emptyList())); // finish or fail task if (randomBoolean()) { @@ -349,7 +348,7 @@ public void testTaskLocalAbort() { when(client.settings()).thenReturn(Settings.EMPTY); PersistentTasksService persistentTasksService = new PersistentTasksService(null, null, client) { @Override - void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { + void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { fail("Shouldn't be called during local abort"); } diff --git a/server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java b/server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java deleted file mode 100644 index 793c1f60c38e6..0000000000000 --- a/server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.tasks; - -import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.ChunkedToXContent; -import org.elasticsearch.test.AbstractXContentTestCase; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; -import java.net.ConnectException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.function.Predicate; -import java.util.function.Supplier; - -import static org.hamcrest.Matchers.equalTo; - -public class CancelTasksResponseTests extends AbstractXContentTestCase { - - // CancelTasksResponse doesn't directly implement ToXContent because it has multiple XContent representations, so we must wrap here - public record CancelTasksResponseWrapper(CancelTasksResponse in) implements ToXContentObject { - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - return ChunkedToXContent.wrapAsToXContent(in.groupedByNone()).toXContent(builder, params); - } - } - - @Override - protected CancelTasksResponseWrapper createTestInstance() { - List randomTasks = randomTasks(); - return new CancelTasksResponseWrapper(new CancelTasksResponse(randomTasks, Collections.emptyList(), Collections.emptyList())); - } - - private static List randomTasks() { - List randomTasks = new ArrayList<>(); - for (int i = 0; i < randomInt(10); i++) { - randomTasks.add(TaskInfoTests.randomTaskInfo()); - } - return randomTasks; - } - - @Override - protected Predicate getRandomFieldsExcludeFilter() { - // status and headers hold arbitrary content, we can't inject random fields in them - return field -> field.endsWith("status") || field.endsWith("headers"); - } - - @Override - protected void assertEqualInstances(CancelTasksResponseWrapper expectedInstanceWrapper, CancelTasksResponseWrapper newInstanceWrapper) { - final var expectedInstance = expectedInstanceWrapper.in(); - final var newInstance = newInstanceWrapper.in(); - assertNotSame(expectedInstance, newInstance); - assertThat(newInstance.getTasks(), equalTo(expectedInstance.getTasks())); - ListTasksResponseTests.assertOnNodeFailures(newInstance.getNodeFailures(), expectedInstance.getNodeFailures()); - ListTasksResponseTests.assertOnTaskFailures(newInstance.getTaskFailures(), expectedInstance.getTaskFailures()); - } - - @Override - protected CancelTasksResponseWrapper doParseInstance(XContentParser parser) { - return new CancelTasksResponseWrapper(CancelTasksResponse.fromXContent(parser)); - } - - @Override - protected boolean supportsUnknownFields() { - return true; - } - - /** - * Test parsing {@link ListTasksResponse} with inner failures as they don't support asserting on xcontent equivalence, given that - * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} - * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. - */ - public void testFromXContentWithFailures() throws IOException { - Supplier instanceSupplier = CancelTasksResponseTests::createTestInstanceWithFailures; - // with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, - // but that does not bother our assertions, as we only want to test that we don't break. - boolean supportsUnknownFields = true; - // exceptions are not of the same type whenever parsed back - boolean assertToXContentEquivalence = false; - AbstractXContentTestCase.testFromXContent( - NUMBER_OF_TEST_RUNS, - instanceSupplier, - supportsUnknownFields, - Strings.EMPTY_ARRAY, - getRandomFieldsExcludeFilter(), - this::createParser, - this::doParseInstance, - this::assertEqualInstances, - assertToXContentEquivalence, - ToXContent.EMPTY_PARAMS - ); - } - - private static CancelTasksResponseWrapper createTestInstanceWithFailures() { - int numNodeFailures = randomIntBetween(0, 3); - List nodeFailures = new ArrayList<>(numNodeFailures); - for (int i = 0; i < numNodeFailures; i++) { - nodeFailures.add(new FailedNodeException(randomAlphaOfLength(5), "error message", new ConnectException())); - } - int numTaskFailures = randomIntBetween(0, 3); - List taskFailures = new ArrayList<>(numTaskFailures); - for (int i = 0; i < numTaskFailures; i++) { - taskFailures.add(new TaskOperationFailure(randomAlphaOfLength(5), randomLong(), new IllegalStateException())); - } - return new CancelTasksResponseWrapper(new CancelTasksResponse(randomTasks(), taskFailures, nodeFailures)); - } - -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java index 9d151e690b071..5dc707e94bdd7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSearchCancellationTestCase.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -117,7 +116,7 @@ protected void cancelSearch(String action) { TaskInfo searchTask = listTasksResponse.getTasks().get(0); logger.info("Cancelling search"); - CancelTasksResponse cancelTasksResponse = clusterAdmin().prepareCancelTasks().setTargetTaskId(searchTask.taskId()).get(); + ListTasksResponse cancelTasksResponse = clusterAdmin().prepareCancelTasks().setTargetTaskId(searchTask.taskId()).get(); assertThat(cancelTasksResponse.getTasks(), hasSize(1)); assertThat(cancelTasksResponse.getTasks().get(0).taskId(), equalTo(searchTask.taskId())); } diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index bb3dc5b866b54..3605d6365f867 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -11,7 +11,6 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; @@ -1318,7 +1317,7 @@ public void testCancelViaTasksAPI() throws Exception { SearchListenerPlugin.waitSearchStarted(); - ActionFuture cancelFuture; + ActionFuture cancelFuture; try { ListTasksResponse listTasksResponse = client(LOCAL_CLUSTER).admin() .cluster() diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java index c167d74eb78d2..04b0b11ad38d4 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java @@ -13,7 +13,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.search.CCSSingleCoordinatorSearchProgressListener; import org.elasticsearch.action.search.SearchProgressActionListener; import org.elasticsearch.action.search.SearchRequest; @@ -155,7 +155,7 @@ public void cancelTask(Runnable runnable, String reason) { CancelTasksRequest req = new CancelTasksRequest().setTargetTaskId(searchId.getTaskId()).setReason(reason); client.admin().cluster().cancelTasks(req, new ActionListener<>() { @Override - public void onResponse(CancelTasksResponse cancelTasksResponse) { + public void onResponse(ListTasksResponse cancelTasksResponse) { runnable.run(); } diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java index c0a286cc5c464..414705aff0b79 100644 --- a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction; import org.elasticsearch.action.support.ActionFilter; @@ -258,7 +257,7 @@ protected TaskId cancelTaskWithXOpaqueId(String id, String action) { TaskId taskId = findTaskWithXOpaqueId(id, action); assertNotNull(taskId); logger.trace("Cancelling task " + taskId); - CancelTasksResponse response = clusterAdmin().prepareCancelTasks().setTargetTaskId(taskId).get(); + ListTasksResponse response = clusterAdmin().prepareCancelTasks().setTargetTaskId(taskId).get(); assertThat(response.getTasks(), hasSize(1)); assertThat(response.getTasks().get(0).action(), equalTo(action)); logger.trace("Task is cancelled " + taskId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java index 8ce41262a1e1d..bcf3c1f58cfa9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java @@ -11,7 +11,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedTransportMasterNodeAction; @@ -115,7 +115,7 @@ protected void masterOperation( } // package-private for testing - static void cancelDownloadTask(Client client, String modelId, ActionListener listener, TimeValue timeout) { + static void cancelDownloadTask(Client client, String modelId, ActionListener listener, TimeValue timeout) { logger.debug(() -> format("[%s] Checking if download task exists and cancelling it", modelId)); OriginSettingClient mlClient = new OriginSettingClient(client, ML_ORIGIN); @@ -283,11 +283,11 @@ private static void executeTaskCancellation( Client client, String modelId, TaskInfo taskInfo, - ActionListener listener, + ActionListener listener, TimeValue timeout ) { if (taskInfo != null) { - ActionListener cancelListener = ActionListener.wrap(listener::onResponse, e -> { + ActionListener cancelListener = ActionListener.wrap(listener::onResponse, e -> { Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof ResourceNotFoundException) { logger.debug(() -> format("[%s] Task no longer exists when attempting to cancel it", modelId)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java index 1ca78df1fad3d..0ccdd1eb64601 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java @@ -12,8 +12,8 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.get.GetIndexAction; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; @@ -278,7 +278,7 @@ public void cancel(String reason, TimeValue timeout) { // We need to cancel the reindexing task within context with ML origin as we started the task // from the same context - CancelTasksResponse cancelReindexResponse = cancelTaskWithinMlOriginContext(cancelReindex); + ListTasksResponse cancelReindexResponse = cancelTaskWithinMlOriginContext(cancelReindex); Throwable firstError = null; if (cancelReindexResponse.getNodeFailures().isEmpty() == false) { @@ -296,7 +296,7 @@ public void cancel(String reason, TimeValue timeout) { } } - private CancelTasksResponse cancelTaskWithinMlOriginContext(CancelTasksRequest cancelTasksRequest) { + private ListTasksResponse cancelTaskWithinMlOriginContext(CancelTasksRequest cancelTasksRequest) { final ThreadContext threadContext = client.threadPool().getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashWithOrigin(ML_ORIGIN)) { return client.admin().cluster().cancelTasks(cancelTasksRequest).actionGet(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelActionTests.java index 4f1a99f634a0a..feb35195e3e38 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelActionTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.elasticsearch.action.support.PlainActionFuture; @@ -57,7 +56,7 @@ public void tearDownThreadPool() { public void testCancelDownloadTaskCallsListenerWithNullWhenNoTasksExist() { var client = mockClientWithTasksResponse(Collections.emptyList(), threadPool); - var listener = new PlainActionFuture(); + var listener = new PlainActionFuture(); cancelDownloadTask(client, "modelId", listener, TIMEOUT); @@ -70,13 +69,13 @@ public void testCancelDownloadTaskCallsOnFailureWithErrorWhenCancellingFailsWith doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; listener.onFailure(new Exception("cancel error")); return Void.TYPE; }).when(client).execute(same(CancelTasksAction.INSTANCE), any(), any()); - var listener = new PlainActionFuture(); + var listener = new PlainActionFuture(); cancelDownloadTask(client, "modelId", listener, TIMEOUT); @@ -91,13 +90,13 @@ public void testCancelDownloadTaskCallsOnResponseNullWhenTheTaskNoLongerExistsWh doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; listener.onFailure(new ResourceNotFoundException("task no longer there")); return Void.TYPE; }).when(client).execute(same(CancelTasksAction.INSTANCE), any(), any()); - var listener = new PlainActionFuture(); + var listener = new PlainActionFuture(); cancelDownloadTask(client, "modelId", listener, TIMEOUT); @@ -115,7 +114,7 @@ public void testCancelDownloadTasksCallsGetsUnableToRetrieveTaskInfoError() { return Void.TYPE; }).when(client).execute(same(TransportListTasksAction.TYPE), any(), any()); - var listener = new PlainActionFuture(); + var listener = new PlainActionFuture(); cancelDownloadTask(client, "modelId", listener, TIMEOUT); @@ -127,10 +126,10 @@ public void testCancelDownloadTasksCallsGetsUnableToRetrieveTaskInfoError() { public void testCancelDownloadTaskCallsOnResponseWithTheCancelResponseWhenATaskExists() { var client = mockClientWithTasksResponse(getTaskInfoListOfOne(), threadPool); - var cancelResponse = mock(CancelTasksResponse.class); + var cancelResponse = mock(ListTasksResponse.class); mockCancelTasksResponse(client, cancelResponse); - var listener = new PlainActionFuture(); + var listener = new PlainActionFuture(); cancelDownloadTask(client, "modelId", listener, TIMEOUT); @@ -142,12 +141,12 @@ private static void mockCancelTask(Client client) { when(cluster.prepareCancelTasks()).thenReturn(new CancelTasksRequestBuilder(client)); } - private static void mockCancelTasksResponse(Client client, CancelTasksResponse response) { + private static void mockCancelTasksResponse(Client client, ListTasksResponse response) { mockCancelTask(client); doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; listener.onResponse(response); return Void.TYPE; diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java index f667ae4b80d03..7ad54901e2d06 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction; import org.elasticsearch.action.support.ActionFilter; @@ -271,7 +270,7 @@ protected TaskId cancelTaskWithXOpaqueId(String id, String action) { TaskId taskId = findTaskWithXOpaqueId(id, action); assertNotNull(taskId); logger.trace("Cancelling task " + taskId); - CancelTasksResponse response = clusterAdmin().prepareCancelTasks().setTargetTaskId(taskId).get(); + ListTasksResponse response = clusterAdmin().prepareCancelTasks().setTargetTaskId(taskId).get(); assertThat(response.getTasks(), hasSize(1)); assertThat(response.getTasks().get(0).action(), equalTo(action)); logger.trace("Task is cancelled " + taskId); From f1e25fe723aa2b8d306812ad203013c60912c736 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 16 Jan 2024 22:57:18 +0100 Subject: [PATCH 58/95] Remove unused FieldUsageShardRequest (#104434) It's in the title, this isn't used. --- .../indices/stats/FieldUsageShardRequest.java | 61 ------------------- 1 file changed, 61 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardRequest.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardRequest.java deleted file mode 100644 index 1c3f9672f712c..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardRequest.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.stats; - -import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.tasks.CancellableTask; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskId; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Map; - -public class FieldUsageShardRequest extends BroadcastShardRequest { - - private final String[] fields; - - FieldUsageShardRequest(ShardId shardId, FieldUsageStatsRequest request) { - super(shardId, request); - this.fields = request.fields(); - } - - FieldUsageShardRequest(StreamInput in) throws IOException { - super(in); - this.fields = in.readStringArray(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(fields); - } - - @Override - public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new CancellableTask(id, type, action, "", parentTaskId, headers) { - @Override - public String getDescription() { - return FieldUsageShardRequest.this.getDescription(); - } - }; - } - - @Override - public String getDescription() { - return "get field usage for shard: [" + shardId() + "], fields: " + Arrays.toString(fields); - } - - public String[] fields() { - return fields; - } -} From c8fc737846e927f3caa134df395f0f9d71af66f4 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 16 Jan 2024 14:35:22 -0800 Subject: [PATCH 59/95] AwaitsFix #104436 --- .../repositories/s3/S3BlobContainerRetriesTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index 34e14dc718818..b090fb3d34814 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -509,6 +509,7 @@ public void testWriteLargeBlobStreaming() throws Exception { assertEquals(blobSize, bytesReceived.get()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104436") public void testReadRetriesAfterMeaningfulProgress() throws Exception { final int maxRetries = between(0, 5); final int bufferSizeBytes = scaledRandomIntBetween( From 32ace953c8550db81febf7513de4d11956f6ef38 Mon Sep 17 00:00:00 2001 From: Andrew Wilkins Date: Wed, 17 Jan 2024 09:14:13 +0800 Subject: [PATCH 60/95] x-pack/plugin/apm-data: fix `@custom` component templates (#104182) Use `@custom` instead of `apm@custom`. This is an enhancement over what Fleet sets up; it is an additive improvement in the direction of https://github.com/elastic/elasticsearch/issues/97664. The rollup data streams' `@custom` component templates now include the duration, like what Fleet sets up. Add a YAML REST test, and a unit test ensuring consistency across the index templates. --- docs/changelog/104182.yaml | 5 ++ .../logs-apm.app@template.yaml | 4 +- .../logs-apm.error@template.yaml | 4 +- .../metrics-apm.app@template.yaml | 4 +- .../metrics-apm.internal@template.yaml | 4 +- ...-apm.service_destination.10m@template.yaml | 8 +- ...s-apm.service_destination.1m@template.yaml | 8 +- ...-apm.service_destination.60m@template.yaml | 8 +- ...rics-apm.service_summary.10m@template.yaml | 8 +- ...trics-apm.service_summary.1m@template.yaml | 8 +- ...rics-apm.service_summary.60m@template.yaml | 8 +- ...-apm.service_transaction.10m@template.yaml | 8 +- ...s-apm.service_transaction.1m@template.yaml | 8 +- ...-apm.service_transaction.60m@template.yaml | 8 +- .../metrics-apm.transaction.10m@template.yaml | 8 +- .../metrics-apm.transaction.1m@template.yaml | 8 +- .../metrics-apm.transaction.60m@template.yaml | 8 +- .../traces-apm.rum@template.yaml | 6 +- .../traces-apm.sampled@template.yaml | 6 +- .../index-templates/traces-apm@template.yaml | 4 +- .../APMIndexTemplateRegistryTests.java | 44 +++++++++++ .../test/30_custom_templates.yml | 76 +++++++++++++++++++ 22 files changed, 189 insertions(+), 64 deletions(-) create mode 100644 docs/changelog/104182.yaml create mode 100644 x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/30_custom_templates.yml diff --git a/docs/changelog/104182.yaml b/docs/changelog/104182.yaml new file mode 100644 index 0000000000000..b5cf10f941cc6 --- /dev/null +++ b/docs/changelog/104182.yaml @@ -0,0 +1,5 @@ +pr: 104182 +summary: "Apm-data: fix `@custom` component templates" +area: Data streams +type: bug +issues: [] diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml index 0ebbb99a1e379..3d9c1490e5a86 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.app@template.yaml @@ -11,12 +11,12 @@ composed_of: - apm@mappings - apm@settings - apm-10d@lifecycle -- apm@custom +- logs@custom - logs-apm.app@custom - ecs@mappings ignore_missing_component_templates: +- logs@custom - logs-apm.app@custom -- apm@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml index 831f7cc404415..4adcf125b2df9 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/logs-apm.error@template.yaml @@ -13,12 +13,12 @@ composed_of: - apm@settings - apm-10d@lifecycle - logs-apm.error@mappings -- apm@custom +- logs@custom - logs-apm.error@custom - ecs@mappings ignore_missing_component_templates: +- logs@custom - logs-apm.error@custom -- apm@custom template: mappings: properties: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml index bdd1fa363bcf4..c2233469110f8 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.app@template.yaml @@ -13,11 +13,11 @@ composed_of: - apm-90d@lifecycle - metrics-apm@mappings - metrics-apm@settings -- apm@custom +- metrics@custom - metrics-apm.app@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom +- metrics@custom - metrics-apm.app@custom template: settings: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml index 205784e22e685..3d6d05c58e780 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.internal@template.yaml @@ -14,11 +14,11 @@ composed_of: - apm-90d@lifecycle - metrics-apm@mappings - metrics-apm@settings -- apm@custom +- metrics@custom - metrics-apm.internal@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom +- metrics@custom - metrics-apm.internal@custom template: settings: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml index 6279e044fbfcf..f234b60b1a6ec 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.10m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_destination@mappings -- apm@custom -- metrics-apm.service_destination@custom +- metrics@custom +- metrics-apm.service_destination.10m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_destination@custom +- metrics@custom +- metrics-apm.service_destination.10m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml index 10e4ca5b39a52..aa4f212532e56 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.1m@template.yaml @@ -15,12 +15,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_destination@mappings -- apm@custom -- metrics-apm.service_destination@custom +- metrics@custom +- metrics-apm.service_destination.1m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_destination@custom +- metrics@custom +- metrics-apm.service_destination.1m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml index dbac0d0d17d89..9b1a26486f482 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_destination.60m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_destination@mappings -- apm@custom -- metrics-apm.service_destination@custom +- metrics@custom +- metrics-apm.service_destination.60m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_destination@custom +- metrics@custom +- metrics-apm.service_destination.60m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml index af99e419d4a56..c37ec93651d9d 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.10m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_summary@mappings -- apm@custom -- metrics-apm.service_summary@custom +- metrics@custom +- metrics-apm.service_summary.10m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_summary@custom +- metrics@custom +- metrics-apm.service_summary.10m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml index 29c28953d6b40..3a99bc8472c66 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.1m@template.yaml @@ -15,12 +15,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_summary@mappings -- apm@custom -- metrics-apm.service_summary@custom +- metrics@custom +- metrics-apm.service_summary.1m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_summary@custom +- metrics@custom +- metrics-apm.service_summary.1m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml index bdbd4900df3bb..d829967f7eddf 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_summary.60m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_summary@mappings -- apm@custom -- metrics-apm.service_summary@custom +- metrics@custom +- metrics-apm.service_summary.60m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_summary@custom +- metrics@custom +- metrics-apm.service_summary.60m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml index 8b4e88391a475..bc21b35d4777f 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.10m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_transaction@mappings -- apm@custom -- metrics-apm.service_transaction@custom +- metrics@custom +- metrics-apm.service_transaction.10m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_transaction@custom +- metrics@custom +- metrics-apm.service_transaction.10m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml index 811067f8e6f30..87a1e254baea7 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.1m@template.yaml @@ -15,12 +15,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_transaction@mappings -- apm@custom -- metrics-apm.service_transaction@custom +- metrics@custom +- metrics-apm.service_transaction.1m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_transaction@custom +- metrics@custom +- metrics-apm.service_transaction.1m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml index db28b7c56aaab..b45ce0ec0fad7 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.service_transaction.60m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.service_transaction@mappings -- apm@custom -- metrics-apm.service_transaction@custom +- metrics@custom +- metrics-apm.service_transaction.60m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.service_transaction@custom +- metrics@custom +- metrics-apm.service_transaction.60m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml index 548f73656fda4..51d3c90cb4af8 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.10m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.transaction@mappings -- apm@custom -- metrics-apm.transaction@custom +- metrics@custom +- metrics-apm.transaction.10m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.transaction@custom +- metrics@custom +- metrics-apm.transaction.10m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml index 6206e7c126c48..8825a93db28dc 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.1m@template.yaml @@ -15,12 +15,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.transaction@mappings -- apm@custom -- metrics-apm.transaction@custom +- metrics@custom +- metrics-apm.transaction.1m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.transaction@custom +- metrics@custom +- metrics-apm.transaction.1m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml index 4ad00aecf23a5..e6657fbfe5d28 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/metrics-apm.transaction.60m@template.yaml @@ -16,12 +16,12 @@ composed_of: - metrics-apm@mappings - metrics-apm@settings - metrics-apm.transaction@mappings -- apm@custom -- metrics-apm.transaction@custom +- metrics@custom +- metrics-apm.transaction.60m@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom -- metrics-apm.transaction@custom +- metrics@custom +- metrics-apm.transaction.60m@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml index 174faf432eb6e..174aec8c5515a 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.rum@template.yaml @@ -14,14 +14,12 @@ composed_of: - apm-90d@lifecycle - traces-apm@mappings - traces-apm.rum@mappings -- apm@custom -- traces-apm@custom +- traces@custom - traces-apm.rum@custom - ecs@mappings ignore_missing_component_templates: +- traces@custom - traces-apm.rum@custom -- traces-apm@custom -- apm@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml index 8c65c69bc3afa..a39d10897a2ed 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm.sampled@template.yaml @@ -11,10 +11,12 @@ composed_of: - traces@mappings - apm@mappings - apm@settings -- apm@custom +- traces@custom +- traces-apm.sampled@custom - ecs@mappings ignore_missing_component_templates: -- apm@custom +- traces@custom +- traces-apm.sampled@custom template: lifecycle: data_retention: 1h diff --git a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml index fb6670a7f7143..de9c47dfd3f1b 100644 --- a/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/index-templates/traces-apm@template.yaml @@ -13,12 +13,12 @@ composed_of: - apm@settings - apm-10d@lifecycle - traces-apm@mappings -- apm@custom +- traces@custom - traces-apm@custom - ecs@mappings ignore_missing_component_templates: +- traces@custom - traces-apm@custom -- apm@custom template: settings: index: diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java index 7dcd6fdd807e4..c10c3fde45162 100644 --- a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java @@ -57,10 +57,12 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.XPackSettings.APM_DATA_ENABLED; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isIn; import static org.hamcrest.Matchers.not; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -249,6 +251,48 @@ public void testIndexTemplates() throws Exception { assertThat(actualInstalledIngestPipelines.get(), equalTo(0)); } + public void testIndexTemplateConventions() throws Exception { + for (Map.Entry entry : apmIndexTemplateRegistry.getComposableTemplateConfigs().entrySet()) { + final String name = entry.getKey(); + final int atIndex = name.lastIndexOf('@'); + assertThat(atIndex, not(equalTo(-1))); + assertThat(name.substring(atIndex + 1), equalTo("template")); + + final String dataStreamType = name.substring(0, name.indexOf('-')); + assertThat(dataStreamType, isIn(List.of("logs", "metrics", "traces"))); + + final ComposableIndexTemplate template = entry.getValue(); + assertThat(template.indexPatterns().size(), equalTo(1)); + + final String namePrefix = name.substring(0, atIndex); + switch (namePrefix) { + case "logs-apm.app", "metrics-apm.app": + // These two data streams have a service-specific dataset. + assertThat(template.indexPatterns().get(0), equalTo(namePrefix + ".*-*")); + break; + default: + assertThat(template.indexPatterns().get(0), equalTo(namePrefix + "-*")); + break; + } + + // Each index template should be composed of the following optional component templates: + // @custom + // -@custom + final List optionalComponentTemplates = template.composedOf() + .stream() + .filter(t -> template.getIgnoreMissingComponentTemplates().contains(t)) + .toList(); + assertThat(optionalComponentTemplates, containsInAnyOrder(namePrefix + "@custom", dataStreamType + "@custom")); + + // There should be no required custom component templates. + final List requiredCustomComponentTemplates = template.getRequiredComponentTemplates() + .stream() + .filter(t -> t.endsWith("@custom")) + .toList(); + assertThat(requiredCustomComponentTemplates, empty()); + } + } + private Map getIndependentComponentTemplateConfigs() { return apmIndexTemplateRegistry.getComponentTemplateConfigs().entrySet().stream().filter(template -> { Settings settings = template.getValue().template().settings(); diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/30_custom_templates.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/30_custom_templates.yml new file mode 100644 index 0000000000000..62b36926d01dc --- /dev/null +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/30_custom_templates.yml @@ -0,0 +1,76 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + + - do: + cluster.put_component_template: + name: "metrics@custom" + body: + template: + mappings: + properties: + custom_field1: + type: keyword + meta: + source: metrics@custom + custom_field2: + type: keyword + meta: + source: metrics@custom + + - do: + cluster.put_component_template: + name: "metrics-apm.app@custom" + body: + template: + mappings: + properties: + custom_field2: + type: keyword + meta: + source: metrics-apm.app@custom + custom_field3: + type: keyword + meta: + source: metrics-apm.app@custom + +--- +"Test metrics @custom component templates": + - do: + indices.create_data_stream: + name: metrics-apm.app.svc1-testing + - do: + # Wait for cluster state changes to be applied before + # querying field mappings. + cluster.health: + wait_for_events: languid + - do: + indices.get_field_mapping: + index: metrics-apm.app.svc1-testing + fields: custom_field* + - set: {_arbitrary_key_: index} + - match: + $body.$index.mappings: + custom_field1: + full_name: custom_field1 + mapping: + custom_field1: + type: keyword + meta: + source: metrics@custom + custom_field2: + full_name: custom_field2 + mapping: + custom_field2: + type: keyword + meta: + source: metrics-apm.app@custom + custom_field3: + full_name: custom_field3 + mapping: + custom_field3: + type: keyword + meta: + source: metrics-apm.app@custom From 876e70159c01ae306251281ae2fdbabca8732ed9 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 16 Jan 2024 20:25:55 -0800 Subject: [PATCH 61/95] Prepare enrich plan to support multi clusters (#104355) This is a prerequisite to support enrich in cross-clusters query. The main change in this pull request is to replace esIndex in logical and physical Enrich plan with a map from the cluster to concrete enrich indices so that each cluster can select its own concrete enrich index for performing lookups. --- docs/changelog/104355.yaml | 5 + .../org/elasticsearch/TransportVersions.java | 1 + .../xpack/esql/EsqlTestUtils.java | 2 +- .../xpack/esql/analysis/Analyzer.java | 66 ++++------ .../xpack/esql/analysis/EnrichResolution.java | 69 +++++++++- .../esql/enrich/EnrichPolicyResolution.java | 13 -- .../esql/enrich/EnrichPolicyResolver.java | 120 +++++++++++------- .../xpack/esql/io/stream/PlanNamedTypes.java | 97 ++++++++++---- .../xpack/esql/parser/LogicalPlanBuilder.java | 1 + .../xpack/esql/plan/logical/Enrich.java | 24 ++-- .../xpack/esql/plan/physical/EnrichExec.java | 43 +++++-- .../esql/planner/LocalExecutionPlanner.java | 12 +- .../xpack/esql/planner/Mapper.java | 8 +- .../xpack/esql/plugin/ComputeService.java | 20 ++- .../xpack/esql/session/EsqlSession.java | 30 +---- .../elasticsearch/xpack/esql/CsvTests.java | 17 +-- .../esql/analysis/AnalyzerTestUtils.java | 27 ++-- .../xpack/esql/analysis/AnalyzerTests.java | 8 +- .../LocalPhysicalPlanOptimizerTests.java | 29 ++--- .../optimizer/LogicalPlanOptimizerTests.java | 12 +- .../optimizer/PhysicalPlanOptimizerTests.java | 29 ++--- .../esql/parser/StatementParserTests.java | 4 + .../planner/LocalExecutionPlannerTests.java | 1 + .../esql/stats/PlanExecutorMetricsTests.java | 16 ++- .../esql/tree/EsqlNodeSubclassTests.java | 17 --- .../xpack/ql/tree/NodeSubclassTests.java | 5 + 26 files changed, 387 insertions(+), 289 deletions(-) create mode 100644 docs/changelog/104355.yaml delete mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolution.java diff --git a/docs/changelog/104355.yaml b/docs/changelog/104355.yaml new file mode 100644 index 0000000000000..2a100faf3c35f --- /dev/null +++ b/docs/changelog/104355.yaml @@ -0,0 +1,5 @@ +pr: 104355 +summary: Prepare enrich plan to support multi clusters +area: ES|QL +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 0951f1b42e8b5..45bbd551c0e70 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -185,6 +185,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_ENRICH_POLICY_CCQ_MODE = def(8_573_00_0); public static final TransportVersion DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ = def(8_574_00_0); public static final TransportVersion PEERFINDER_REPORTS_PEERS_MASTERS = def(8_575_00_0); + public static final TransportVersion ESQL_MULTI_CLUSTERS_ENRICH = def(8_576_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java index 8edcdd9edb124..9c8d5f420d53b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java @@ -145,7 +145,7 @@ public static Map loadMapping(String name) { } public static EnrichResolution emptyPolicyResolution() { - return new EnrichResolution(Set.of(), Set.of()); + return new EnrichResolution(); } public static SearchStats statsForMissingField(String... names) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java index af5a0bd3f0b70..e0a36c8d81e82 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Analyzer.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.plan.logical.Drop; import org.elasticsearch.xpack.esql.plan.logical.Enrich; @@ -43,7 +42,6 @@ import org.elasticsearch.xpack.ql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.ql.index.EsIndex; -import org.elasticsearch.xpack.ql.index.IndexResolution; import org.elasticsearch.xpack.ql.plan.TableIdentifier; import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.EsRelation; @@ -209,52 +207,35 @@ protected LogicalPlan rule(Enrich plan, AnalyzerContext context) { // the policy does not exist return plan; } - String policyName = (String) plan.policyName().fold(); - EnrichPolicyResolution policyRes = context.enrichResolution() - .resolvedPolicies() - .stream() - .filter(x -> x.policyName().equals(policyName)) - .findFirst() - .orElse(new EnrichPolicyResolution(policyName, null, null)); - - IndexResolution idx = policyRes.index(); - EnrichPolicy policy = policyRes.policy(); - - var policyNameExp = policy == null || idx == null - ? new UnresolvedAttribute( - plan.policyName().source(), - policyName, - null, - unresolvedPolicyError(policyName, context.enrichResolution()) - ) - : plan.policyName(); - - var matchField = policy != null && (plan.matchField() == null || plan.matchField() instanceof EmptyAttribute) - ? new UnresolvedAttribute(plan.source(), policy.getMatchField()) - : plan.matchField(); - - List enrichFields = policy == null || idx == null - ? (plan.enrichFields() == null ? List.of() : plan.enrichFields()) - : calculateEnrichFields( + final String policyName = (String) plan.policyName().fold(); + final EnrichResolution.ResolvedPolicy resolvedPolicy = context.enrichResolution().getResolvedPolicy(policyName); + if (resolvedPolicy != null) { + EnrichPolicy policy = resolvedPolicy.policy(); + var matchField = plan.matchField() == null || plan.matchField() instanceof EmptyAttribute + ? new UnresolvedAttribute(plan.source(), policy.getMatchField()) + : plan.matchField(); + List enrichFields = calculateEnrichFields( plan.source(), policyName, - mappingAsAttributes(plan.source(), idx.get().mapping()), + mappingAsAttributes(plan.source(), resolvedPolicy.mapping()), plan.enrichFields(), policy ); - - return new Enrich(plan.source(), plan.child(), plan.mode(), policyNameExp, matchField, policyRes, enrichFields); - } - - private String unresolvedPolicyError(String policyName, EnrichResolution enrichResolution) { - List potentialMatches = StringUtils.findSimilar(policyName, enrichResolution.existingPolicies()); - String msg = "unresolved enrich policy [" + policyName + "]"; - if (CollectionUtils.isEmpty(potentialMatches) == false) { - msg += ", did you mean " - + (potentialMatches.size() == 1 ? "[" + potentialMatches.get(0) + "]" : "any of " + potentialMatches) - + "?"; + return new Enrich( + plan.source(), + plan.child(), + plan.mode(), + plan.policyName(), + matchField, + policy, + resolvedPolicy.concreteIndices(), + enrichFields + ); + } else { + String error = context.enrichResolution().getError(policyName); + var policyNameExp = new UnresolvedAttribute(plan.policyName().source(), policyName, null, error); + return new Enrich(plan.source(), plan.child(), plan.mode(), policyNameExp, plan.matchField(), null, Map.of(), List.of()); } - return msg; } public static List calculateEnrichFields( @@ -589,6 +570,7 @@ private LogicalPlan resolveEnrich(Enrich enrich, List childrenOutput) enrich.policyName(), resolved, enrich.policy(), + enrich.concreteIndices(), enrich.enrichFields() ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/EnrichResolution.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/EnrichResolution.java index 332e5e60565b6..deb683a94a8f1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/EnrichResolution.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/EnrichResolution.java @@ -7,8 +7,73 @@ package org.elasticsearch.xpack.esql.analysis; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; +import org.elasticsearch.xpack.ql.type.EsField; +import org.elasticsearch.xpack.ql.util.CollectionUtils; +import org.elasticsearch.xpack.ql.util.StringUtils; +import java.util.Collection; +import java.util.List; +import java.util.Map; import java.util.Set; -public record EnrichResolution(Set resolvedPolicies, Set existingPolicies) {} +/** + * Holds the resolution results of the enrich polices. + * The results and errors are collected via {@link #addResolvedPolicy} and {@link #addError}. + * And the results can be retrieved via {@link #getResolvedPolicy} and {@link #getError} + */ +public final class EnrichResolution { + + private final Map resolvedPolicies = ConcurrentCollections.newConcurrentMap(); // policy name -> resolved policy + private final Map errors = ConcurrentCollections.newConcurrentMap(); // policy to error + private final Set existingPolicies = ConcurrentCollections.newConcurrentSet(); // for suggestion + + public ResolvedPolicy getResolvedPolicy(String policyName) { + return resolvedPolicies.get(policyName); + } + + public Collection resolvedEnrichPolicies() { + return resolvedPolicies.values().stream().map(r -> r.policy).toList(); + } + + public String getError(String policyName) { + final String error = errors.get(policyName); + if (error != null) { + return error; + } + return notFoundError(policyName); + } + + public void addResolvedPolicy( + String policyName, + EnrichPolicy policy, + Map concreteIndices, + Map mapping + ) { + resolvedPolicies.put(policyName, new ResolvedPolicy(policy, concreteIndices, mapping)); + } + + public void addError(String policyName, String reason) { + errors.put(policyName, reason); + } + + public void addExistingPolicies(Set policyNames) { + existingPolicies.addAll(policyNames); + } + + private String notFoundError(String policyName) { + List potentialMatches = StringUtils.findSimilar(policyName, existingPolicies); + String msg = "unresolved enrich policy [" + policyName + "]"; + if (CollectionUtils.isEmpty(potentialMatches) == false) { + msg += ", did you mean " + + (potentialMatches.size() == 1 ? "[" + potentialMatches.get(0) + "]" : "any of " + potentialMatches) + + "?"; + } + return msg; + } + + public record ResolvedPolicy(EnrichPolicy policy, Map concreteIndices, Map mapping) { + + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolution.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolution.java deleted file mode 100644 index 5014fe1fcd1df..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolution.java +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql.enrich; - -import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.ql.index.IndexResolution; - -public record EnrichPolicyResolution(String policyName, EnrichPolicy policy, IndexResolution index) {} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index 1e21886a7ac4b..d5783e5ef0100 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -11,12 +11,16 @@ import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; @@ -25,10 +29,14 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.enrich.EnrichMetadata; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; +import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.session.EsqlSession; +import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.index.IndexResolver; +import java.util.Collection; +import java.util.List; import java.util.Map; import java.util.Set; @@ -48,36 +56,69 @@ public EnrichPolicyResolver(ClusterService clusterService, TransportService tran transportService.registerRequestHandler( RESOLVE_ACTION_NAME, threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME), - ResolveRequest::new, + LookupRequest::new, new RequestHandler() ); } - public void resolvePolicy(String policyName, ActionListener listener) { + public void resolvePolicy(Collection policyNames, ActionListener listener) { + if (policyNames.isEmpty()) { + listener.onResponse(new EnrichResolution()); + return; + } transportService.sendRequest( clusterService.localNode(), RESOLVE_ACTION_NAME, - new ResolveRequest(policyName), - new ActionListenerResponseHandler<>( - listener.map(r -> r.resolution), - ResolveResponse::new, - threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME) - ) + new LookupRequest(policyNames), + new ActionListenerResponseHandler<>(listener.delegateFailureAndWrap((l, lookup) -> { + final EnrichResolution resolution = new EnrichResolution(); + resolution.addExistingPolicies(lookup.allPolicies); + try (RefCountingListener refs = new RefCountingListener(l.map(unused -> resolution))) { + for (Map.Entry e : lookup.policies.entrySet()) { + resolveOnePolicy(e.getKey(), e.getValue(), resolution, refs.acquire()); + } + } + }), LookupResponse::new, threadPool.executor(EsqlPlugin.ESQL_THREAD_POOL_NAME)) ); } + private void resolveOnePolicy(String policyName, EnrichPolicy policy, EnrichResolution resolution, ActionListener listener) { + ThreadContext threadContext = threadPool.getThreadContext(); + listener = ContextPreservingActionListener.wrapPreservingContext(listener, threadContext); + try (ThreadContext.StoredContext ignored = threadContext.stashWithOrigin(ClientHelper.ENRICH_ORIGIN)) { + indexResolver.resolveAsMergedMapping( + EnrichPolicy.getBaseName(policyName), + IndexResolver.ALL_FIELDS, + false, + Map.of(), + listener.map(indexResult -> { + if (indexResult.isValid()) { + EsIndex esIndex = indexResult.get(); + Set indices = esIndex.concreteIndices(); + var concreteIndices = Map.of(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY, Iterables.get(indices, 0)); + resolution.addResolvedPolicy(policyName, policy, concreteIndices, esIndex.mapping()); + } else { + resolution.addError(policyName, indexResult.toString()); + } + return null; + }), + EsqlSession::specificValidity + ); + } + } + private static UnsupportedOperationException unsupported() { return new UnsupportedOperationException("local node transport action"); } - private static class ResolveRequest extends TransportRequest { - private final String policyName; + private static class LookupRequest extends TransportRequest { + private final Collection policyNames; - ResolveRequest(String policyName) { - this.policyName = policyName; + LookupRequest(Collection policyNames) { + this.policyNames = policyNames; } - ResolveRequest(StreamInput in) { + LookupRequest(StreamInput in) { throw unsupported(); } @@ -87,14 +128,16 @@ public void writeTo(StreamOutput out) { } } - private static class ResolveResponse extends TransportResponse { - private final EnrichPolicyResolution resolution; + private static class LookupResponse extends TransportResponse { + final Map policies; + final Set allPolicies; - ResolveResponse(EnrichPolicyResolution resolution) { - this.resolution = resolution; + LookupResponse(Map policies, Set allPolicies) { + this.policies = policies; + this.allPolicies = allPolicies; } - ResolveResponse(StreamInput in) { + LookupResponse(StreamInput in) { throw unsupported(); } @@ -104,38 +147,19 @@ public void writeTo(StreamOutput out) { } } - private class RequestHandler implements TransportRequestHandler { + private class RequestHandler implements TransportRequestHandler { @Override - public void messageReceived(ResolveRequest request, TransportChannel channel, Task task) throws Exception { - String policyName = request.policyName; - EnrichPolicy policy = policies().get(policyName); - ThreadContext threadContext = threadPool.getThreadContext(); - ActionListener listener = new ChannelActionListener<>(channel); - listener = ContextPreservingActionListener.wrapPreservingContext(listener, threadContext); - try (ThreadContext.StoredContext ignored = threadContext.stashWithOrigin(ClientHelper.ENRICH_ORIGIN)) { - indexResolver.resolveAsMergedMapping( - EnrichPolicy.getBaseName(policyName), - IndexResolver.ALL_FIELDS, - false, - Map.of(), - listener.map(indexResult -> new ResolveResponse(new EnrichPolicyResolution(policyName, policy, indexResult))), - EsqlSession::specificValidity - ); + public void messageReceived(LookupRequest request, TransportChannel channel, Task task) throws Exception { + final EnrichMetadata metadata = clusterService.state().metadata().custom(EnrichMetadata.TYPE); + final Map policies = metadata == null ? Map.of() : metadata.getPolicies(); + final Map results = Maps.newMapWithExpectedSize(request.policyNames.size()); + for (String policyName : request.policyNames) { + EnrichPolicy p = policies.get(policyName); + if (p != null) { + results.put(policyName, new EnrichPolicy(p.getType(), null, List.of(), p.getMatchField(), p.getEnrichFields())); + } } + new ChannelActionListener<>(channel).onResponse(new LookupResponse(results, policies.keySet())); } } - - public Set allPolicyNames() { - // TODO: remove this suggestion as it exposes policy names without the right permission - return policies().keySet(); - } - - private Map policies() { - if (clusterService == null || clusterService.state() == null) { - return Map.of(); - } - EnrichMetadata metadata = clusterService.state().metadata().custom(EnrichMetadata.TYPE); - return metadata == null ? Map.of() : metadata.getPolicies(); - } - } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index b6dce816db218..2b73d0bf9f7b1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -12,10 +12,13 @@ import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.dissect.DissectParser; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; @@ -167,7 +170,6 @@ import org.elasticsearch.xpack.ql.expression.predicate.regex.RegexMatch; import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardPattern; import org.elasticsearch.xpack.ql.index.EsIndex; -import org.elasticsearch.xpack.ql.index.IndexResolution; import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.EsRelation; import org.elasticsearch.xpack.ql.plan.logical.Filter; @@ -482,15 +484,25 @@ static void writeEvalExec(PlanStreamOutput out, EvalExec evalExec) throws IOExce } static EnrichExec readEnrichExec(PlanStreamInput in) throws IOException { - return new EnrichExec( - in.readSource(), - in.readPhysicalPlanNode(), - in.readNamedExpression(), - in.readString(), - in.readString(), - readEsIndex(in), - readNamedExpressions(in) - ); + final Source source = in.readSource(); + final PhysicalPlan child = in.readPhysicalPlanNode(); + final NamedExpression matchField = in.readNamedExpression(); + final String policyName = in.readString(); + final String policyMatchField = in.readString(); + final Map concreteIndices; + final Enrich.Mode mode; + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_MULTI_CLUSTERS_ENRICH)) { + mode = in.readEnum(Enrich.Mode.class); + concreteIndices = in.readMap(StreamInput::readString, StreamInput::readString); + } else { + mode = Enrich.Mode.ANY; + EsIndex esIndex = readEsIndex(in); + if (esIndex.concreteIndices().size() != 1) { + throw new IllegalStateException("expected a single concrete enrich index; got " + esIndex.concreteIndices()); + } + concreteIndices = Map.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, Iterables.get(esIndex.concreteIndices(), 0)); + } + return new EnrichExec(source, child, mode, matchField, policyName, policyMatchField, concreteIndices, readNamedExpressions(in)); } static void writeEnrichExec(PlanStreamOutput out, EnrichExec enrich) throws IOException { @@ -499,7 +511,17 @@ static void writeEnrichExec(PlanStreamOutput out, EnrichExec enrich) throws IOEx out.writeNamedExpression(enrich.matchField()); out.writeString(enrich.policyName()); out.writeString(enrich.policyMatchField()); - writeEsIndex(out, enrich.enrichIndex()); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_MULTI_CLUSTERS_ENRICH)) { + out.writeEnum(enrich.mode()); + out.writeMap(enrich.concreteIndices(), StreamOutput::writeString, StreamOutput::writeString); + } else { + if (enrich.concreteIndices().keySet().equals(Set.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY))) { + String concreteIndex = enrich.concreteIndices().get(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + writeEsIndex(out, new EsIndex(concreteIndex, Map.of(), Set.of(concreteIndex))); + } else { + throw new IllegalStateException("expected a single concrete enrich index; got " + enrich.concreteIndices()); + } + } writeNamedExpressions(out, enrich.enrichFields()); } @@ -725,19 +747,29 @@ static void writeEval(PlanStreamOutput out, Eval eval) throws IOException { } static Enrich readEnrich(PlanStreamInput in) throws IOException { - Enrich.Mode m = Enrich.Mode.ANY; + Enrich.Mode mode = Enrich.Mode.ANY; if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ENRICH_POLICY_CCQ_MODE)) { - m = in.readEnum(Enrich.Mode.class); + mode = in.readEnum(Enrich.Mode.class); } - return new Enrich( - in.readSource(), - in.readLogicalPlanNode(), - m, - in.readExpression(), - in.readNamedExpression(), - new EnrichPolicyResolution(in.readString(), new EnrichPolicy(in), IndexResolution.valid(readEsIndex(in))), - readNamedExpressions(in) - ); + final Source source = in.readSource(); + final LogicalPlan child = in.readLogicalPlanNode(); + final Expression policyName = in.readExpression(); + final NamedExpression matchField = in.readNamedExpression(); + if (in.getTransportVersion().before(TransportVersions.ESQL_MULTI_CLUSTERS_ENRICH)) { + in.readString(); // discard the old policy name + } + final EnrichPolicy policy = new EnrichPolicy(in); + final Map concreteIndices; + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_MULTI_CLUSTERS_ENRICH)) { + concreteIndices = in.readMap(StreamInput::readString, StreamInput::readString); + } else { + EsIndex esIndex = readEsIndex(in); + if (esIndex.concreteIndices().size() > 1) { + throw new IllegalStateException("expected a single enrich index; got " + esIndex); + } + concreteIndices = Map.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, Iterables.get(esIndex.concreteIndices(), 0)); + } + return new Enrich(source, child, mode, policyName, matchField, policy, concreteIndices, readNamedExpressions(in)); } static void writeEnrich(PlanStreamOutput out, Enrich enrich) throws IOException { @@ -749,9 +781,22 @@ static void writeEnrich(PlanStreamOutput out, Enrich enrich) throws IOException out.writeLogicalPlanNode(enrich.child()); out.writeExpression(enrich.policyName()); out.writeNamedExpression(enrich.matchField()); - out.writeString(enrich.policy().policyName()); - enrich.policy().policy().writeTo(out); - writeEsIndex(out, enrich.policy().index().get()); + if (out.getTransportVersion().before(TransportVersions.ESQL_MULTI_CLUSTERS_ENRICH)) { + out.writeString(BytesRefs.toString(enrich.policyName().fold())); // old policy name + } + enrich.policy().writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_MULTI_CLUSTERS_ENRICH)) { + out.writeMap(enrich.concreteIndices(), StreamOutput::writeString, StreamOutput::writeString); + } else { + Map concreteIndices = enrich.concreteIndices(); + if (concreteIndices.keySet().equals(Set.of(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY))) { + String enrichIndex = concreteIndices.get(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + EsIndex esIndex = new EsIndex(enrichIndex, Map.of(), Set.of(enrichIndex)); + writeEsIndex(out, esIndex); + } else { + throw new IllegalStateException("expected a single enrich index; got " + concreteIndices); + } + } writeNamedExpressions(out, enrich.enrichFields()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java index 5e90f6e8e44c9..a6ce2db548504 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/LogicalPlanBuilder.java @@ -330,6 +330,7 @@ public PlanFactory visitEnrichCommand(EsqlBaseParser.EnrichCommandContext ctx) { new Literal(source(ctx.policyName), policyName, DataTypes.KEYWORD), matchField, null, + Map.of(), keepClauses.isEmpty() ? List.of() : keepClauses ); }; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java index 37a0ff0fe5001..d5db90aa07325 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Enrich.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.esql.plan.logical; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.ql.capabilities.Resolvables; import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.EmptyAttribute; @@ -29,8 +29,9 @@ public class Enrich extends UnaryPlan { private final Expression policyName; private final NamedExpression matchField; - private final EnrichPolicyResolution policy; - private List enrichFields; + private final EnrichPolicy policy; + private final Map concreteIndices; // cluster -> enrich indices + private final List enrichFields; private List output; private final Mode mode; @@ -61,7 +62,8 @@ public Enrich( Mode mode, Expression policyName, NamedExpression matchField, - EnrichPolicyResolution policy, + EnrichPolicy policy, + Map concreteIndices, List enrichFields ) { super(source, child); @@ -69,6 +71,7 @@ public Enrich( this.policyName = policyName; this.matchField = matchField; this.policy = policy; + this.concreteIndices = concreteIndices; this.enrichFields = enrichFields; } @@ -80,10 +83,14 @@ public List enrichFields() { return enrichFields; } - public EnrichPolicyResolution policy() { + public EnrichPolicy policy() { return policy; } + public Map concreteIndices() { + return concreteIndices; + } + public Expression policyName() { return policyName; } @@ -102,12 +109,12 @@ public boolean expressionsResolved() { @Override public UnaryPlan replaceChild(LogicalPlan newChild) { - return new Enrich(source(), newChild, mode, policyName, matchField, policy, enrichFields); + return new Enrich(source(), newChild, mode, policyName, matchField, policy, concreteIndices, enrichFields); } @Override protected NodeInfo info() { - return NodeInfo.create(this, Enrich::new, child(), mode, policyName, matchField, policy, enrichFields); + return NodeInfo.create(this, Enrich::new, child(), mode, policyName, matchField, policy, concreteIndices, enrichFields); } @Override @@ -131,11 +138,12 @@ public boolean equals(Object o) { && Objects.equals(policyName, enrich.policyName) && Objects.equals(matchField, enrich.matchField) && Objects.equals(policy, enrich.policy) + && Objects.equals(concreteIndices, enrich.concreteIndices) && Objects.equals(enrichFields, enrich.enrichFields); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), mode, policyName, matchField, policy, enrichFields); + return Objects.hash(super.hashCode(), mode, policyName, matchField, policy, concreteIndices, enrichFields); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java index 6f2b83ef0aa6f..0bfaa2db2be5d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EnrichExec.java @@ -6,23 +6,25 @@ */ package org.elasticsearch.xpack.esql.plan.physical; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.NamedExpression; -import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import java.util.List; +import java.util.Map; import java.util.Objects; import static org.elasticsearch.xpack.esql.expression.NamedExpressions.mergeOutputAttributes; public class EnrichExec extends UnaryExec implements EstimatesRowSize { + private final Enrich.Mode mode; private final NamedExpression matchField; private final String policyName; private final String policyMatchField; - private final EsIndex enrichIndex; + private final Map concreteIndices; // cluster -> enrich index private final List enrichFields; /** @@ -32,42 +34,58 @@ public class EnrichExec extends UnaryExec implements EstimatesRowSize { * @param matchField the match field in the source data * @param policyName the enrich policy name * @param policyMatchField the match field name in the policy - * @param enrichIndex the enricy policy index (the system index created by the policy execution, not the source index) + * @param concreteIndices a map from cluster to concrete enrich indices * @param enrichFields the enrich fields */ public EnrichExec( Source source, PhysicalPlan child, + Enrich.Mode mode, NamedExpression matchField, String policyName, String policyMatchField, - EsIndex enrichIndex, + Map concreteIndices, List enrichFields ) { super(source, child); + this.mode = mode; this.matchField = matchField; this.policyName = policyName; this.policyMatchField = policyMatchField; - this.enrichIndex = enrichIndex; + this.concreteIndices = concreteIndices; this.enrichFields = enrichFields; } @Override protected NodeInfo info() { - return NodeInfo.create(this, EnrichExec::new, child(), matchField, policyName, policyMatchField, enrichIndex, enrichFields); + return NodeInfo.create( + this, + EnrichExec::new, + child(), + mode, + matchField, + policyName, + policyMatchField, + concreteIndices, + enrichFields + ); } @Override public EnrichExec replaceChild(PhysicalPlan newChild) { - return new EnrichExec(source(), newChild, matchField, policyName, policyMatchField, enrichIndex, enrichFields); + return new EnrichExec(source(), newChild, mode, matchField, policyName, policyMatchField, concreteIndices, enrichFields); + } + + public Enrich.Mode mode() { + return mode; } public NamedExpression matchField() { return matchField; } - public EsIndex enrichIndex() { - return enrichIndex; + public Map concreteIndices() { + return concreteIndices; } public List enrichFields() { @@ -99,15 +117,16 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; if (super.equals(o) == false) return false; EnrichExec that = (EnrichExec) o; - return Objects.equals(matchField, that.matchField) + return mode.equals(that.mode) + && Objects.equals(matchField, that.matchField) && Objects.equals(policyName, that.policyName) && Objects.equals(policyMatchField, that.policyMatchField) - && Objects.equals(enrichIndex, that.enrichIndex) + && Objects.equals(concreteIndices, that.concreteIndices) && Objects.equals(enrichFields, that.enrichFields); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), matchField, policyName, policyMatchField, enrichIndex, enrichFields); + return Objects.hash(super.hashCode(), mode, matchField, policyName, policyMatchField, concreteIndices, enrichFields); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index d79becfc8a736..992c922693edd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -9,7 +9,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.compute.Describable; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -93,7 +92,6 @@ import java.util.Map; import java.util.Objects; import java.util.Optional; -import java.util.Set; import java.util.function.Function; import java.util.stream.Stream; @@ -110,6 +108,7 @@ public class LocalExecutionPlanner { private static final Logger logger = LogManager.getLogger(LocalExecutionPlanner.class); private final String sessionId; + private final String clusterAlias; private final CancellableTask parentTask; private final BigArrays bigArrays; private final BlockFactory blockFactory; @@ -122,6 +121,7 @@ public class LocalExecutionPlanner { public LocalExecutionPlanner( String sessionId, + String clusterAlias, CancellableTask parentTask, BigArrays bigArrays, BlockFactory blockFactory, @@ -133,6 +133,7 @@ public LocalExecutionPlanner( PhysicalOperationProviders physicalOperationProviders ) { this.sessionId = sessionId; + this.clusterAlias = clusterAlias; this.parentTask = parentTask; this.bigArrays = bigArrays; this.blockFactory = blockFactory; @@ -456,11 +457,10 @@ private PhysicalOperation planEnrich(EnrichExec enrich, LocalExecutionPlannerCon Layout.Builder layoutBuilder = source.layout.builder(); layoutBuilder.append(enrich.enrichFields()); Layout layout = layoutBuilder.build(); - Set indices = enrich.enrichIndex().concreteIndices(); - if (indices.size() != 1) { - throw new EsqlIllegalArgumentException("Resolved enrich should have one concrete index; got " + indices); + String enrichIndex = enrich.concreteIndices().get(clusterAlias); + if (enrichIndex == null) { + throw new EsqlIllegalArgumentException("No concrete enrich index for cluster [" + clusterAlias + "]"); } - String enrichIndex = Iterables.get(indices, 0); return source.with( new EnrichLookupOperator.Factory( sessionId, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java index 3eea84b0bd1f9..9410e9e97d078 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.planner; +import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; @@ -142,10 +143,11 @@ private PhysicalPlan map(UnaryPlan p, PhysicalPlan child) { return new EnrichExec( enrich.source(), child, + enrich.mode(), enrich.matchField(), - enrich.policy().policyName(), - enrich.policy().policy().getMatchField(), - enrich.policy().index().get(), + BytesRefs.toString(enrich.policyName().fold()), + enrich.policy().getMatchField(), + enrich.concreteIndices(), enrich.enrichFields() ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index e781ed4a60c35..172fc0a3dc5cc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -155,7 +155,14 @@ public void execute( .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planConcreteIndices(physicalPlan).toArray(String[]::new)); QueryPragmas queryPragmas = configuration.pragmas(); if (dataNodePlan == null || clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0)) { - var computeContext = new ComputeContext(sessionId, List.of(), configuration, null, null); + var computeContext = new ComputeContext( + sessionId, + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + List.of(), + configuration, + null, + null + ); runCompute( rootTask, computeContext, @@ -187,7 +194,7 @@ public void execute( // run compute on the coordinator runCompute( rootTask, - new ComputeContext(sessionId, List.of(), configuration, exchangeSource, null), + new ComputeContext(sessionId, RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, List.of(), configuration, exchangeSource, null), coordinatorPlan, cancelOnFailure(rootTask, cancelled, refs.acquire()).map(driverProfiles -> { responseHeadersCollector.collect(); @@ -378,6 +385,7 @@ void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, try { LocalExecutionPlanner planner = new LocalExecutionPlanner( context.sessionId, + context.clusterAlias, task, bigArrays, blockFactory, @@ -570,13 +578,14 @@ public void messageReceived(DataNodeRequest request, TransportChannel channel, T ); final ActionListener listener = new ChannelActionListener<>(channel); final EsqlConfiguration configuration = request.configuration(); + String clusterAlias = request.clusterAlias(); acquireSearchContexts( - request.clusterAlias(), + clusterAlias, request.shardIds(), configuration, request.aliasFilters(), ActionListener.wrap(searchContexts -> { - var computeContext = new ComputeContext(sessionId, searchContexts, configuration, null, exchangeSink); + var computeContext = new ComputeContext(sessionId, clusterAlias, searchContexts, configuration, null, exchangeSink); runCompute(parentTask, computeContext, request.plan(), ActionListener.wrap(driverProfiles -> { // don't return until all pages are fetched exchangeSink.addCompletionListener( @@ -669,7 +678,7 @@ void runComputeOnRemoteCluster( ); runCompute( parentTask, - new ComputeContext(localSessionId, List.of(), configuration, exchangeSource, exchangeSink), + new ComputeContext(localSessionId, clusterAlias, List.of(), configuration, exchangeSource, exchangeSink), coordinatorPlan, cancelOnFailure(parentTask, cancelled, refs.acquire()).map(driverProfiles -> { responseHeadersCollector.collect(); @@ -702,6 +711,7 @@ void runComputeOnRemoteCluster( record ComputeContext( String sessionId, + String clusterAlias, List searchContexts, EsqlConfiguration configuration, ExchangeSourceHandler exchangeSource, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 284c78c6e0121..add6a0d24994c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -9,12 +9,11 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.fieldcaps.FieldCapabilities; -import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; @@ -52,7 +51,6 @@ import org.elasticsearch.xpack.ql.type.InvalidMappedField; import org.elasticsearch.xpack.ql.util.Holder; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -150,32 +148,18 @@ public void analyzedPlan(LogicalPlan parsed, ActionListener listene private void preAnalyze(LogicalPlan parsed, BiFunction action, ActionListener listener) { PreAnalyzer.PreAnalysis preAnalysis = preAnalyzer.preAnalyze(parsed); - Set policyNames = new HashSet<>(preAnalysis.policyNames); - EnrichResolution resolution = new EnrichResolution(ConcurrentCollections.newConcurrentSet(), enrichPolicyResolver.allPolicyNames()); - - ActionListener groupedListener = listener.delegateFailureAndWrap((l, unused) -> { - assert resolution.resolvedPolicies().size() == policyNames.size() - : resolution.resolvedPolicies().size() + " != " + policyNames.size(); - + enrichPolicyResolver.resolvePolicy(preAnalysis.policyNames, listener.delegateFailureAndWrap((l, enrichResolution) -> { // first we need the match_fields names from enrich policies and THEN, with an updated list of fields, we call field_caps API - var matchFields = resolution.resolvedPolicies() - .stream() - .filter(p -> p.index().isValid()) // only if the policy by the specified name was found; later the Verifier will be - // triggered - .map(p -> p.policy().getMatchField()) + var matchFields = enrichResolution.resolvedEnrichPolicies() + .stream() // triggered + .map(EnrichPolicy::getMatchField) .collect(Collectors.toSet()); - preAnalyzeIndices( parsed, - l.delegateFailureAndWrap((ll, indexResolution) -> ll.onResponse(action.apply(indexResolution, resolution))), + l.delegateFailureAndWrap((ll, indexResolution) -> ll.onResponse(action.apply(indexResolution, enrichResolution))), matchFields ); - }); - try (RefCountingListener refs = new RefCountingListener(groupedListener)) { - for (String policyName : policyNames) { - enrichPolicyResolver.resolvePolicy(policyName, refs.acquire(resolution.resolvedPolicies()::add)); - } - } + })); } private void preAnalyzeIndices(LogicalPlan parsed, ActionListener listener, Set enrichPolicyMatchFields) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 39a7eee2e616d..0f6dbfb81f141 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.Driver; @@ -39,6 +40,7 @@ import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; @@ -48,7 +50,6 @@ import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.optimizer.LocalLogicalOptimizerContext; import org.elasticsearch.xpack.esql.optimizer.LocalLogicalPlanOptimizer; @@ -89,7 +90,6 @@ import java.net.URL; import java.util.ArrayList; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -263,18 +263,18 @@ private static IndexResolution loadIndexResolution(String mappingName, String in } private static EnrichResolution loadEnrichPolicies() { - Set names = new HashSet<>(); - Set resolutions = new HashSet<>(); + EnrichResolution enrichResolution = new EnrichResolution(); for (CsvTestsDataLoader.EnrichConfig policyConfig : CsvTestsDataLoader.ENRICH_POLICIES) { EnrichPolicy policy = loadEnrichPolicyMapping(policyConfig.policyFileName()); CsvTestsDataLoader.TestsDataset sourceIndex = CSV_DATASET_MAP.get(policy.getIndices().get(0)); // this could practically work, but it's wrong: // EnrichPolicyResolution should contain the policy (system) index, not the source index - IndexResolution idxRes = loadIndexResolution(sourceIndex.mappingFileName(), sourceIndex.indexName()); - names.add(policyConfig.policyName()); - resolutions.add(new EnrichPolicyResolution(policyConfig.policyName(), policy, idxRes)); + EsIndex esIndex = loadIndexResolution(sourceIndex.mappingFileName(), sourceIndex.indexName()).get(); + var concreteIndices = Map.of(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY, Iterables.get(esIndex.concreteIndices(), 0)); + enrichResolution.addResolvedPolicy(policyConfig.policyName(), policy, concreteIndices, esIndex.mapping()); + enrichResolution.addExistingPolicies(Set.of(policyConfig.policyName())); } - return new EnrichResolution(resolutions, names); + return enrichResolution; } private static EnrichPolicy loadEnrichPolicyMapping(String policyFileName) { @@ -338,6 +338,7 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { ); LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner( sessionId, + "", new CancellableTask(1, "transport", "esql", null, TaskId.EMPTY_TASK_ID, Map.of()), bigArrays, blockFactory, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java index e357efe3fcc1f..605bfa7b05bff 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTestUtils.java @@ -9,7 +9,6 @@ import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.EsqlTestUtils; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; @@ -19,6 +18,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Set; import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; @@ -88,28 +88,17 @@ public static IndexResolution analyzerExpandedDefaultMapping() { } public static EnrichResolution defaultEnrichResolution() { - EnrichPolicyResolution policyRes = loadEnrichPolicyResolution( - "languages", - "language_code", - "languages_idx", - "mapping-languages.json" - ); - return new EnrichResolution(Set.of(policyRes), Set.of("languages")); + return loadEnrichPolicyResolution("languages", "language_code", "languages_idx", "mapping-languages.json"); } - public static EnrichPolicyResolution loadEnrichPolicyResolution( - String policyName, - String matchField, - String idxName, - String mappingFile - ) { + public static EnrichResolution loadEnrichPolicyResolution(String policyName, String matchField, String idxName, String mappingFile) { IndexResolution mapping = loadMapping(mappingFile, idxName); List enrichFields = new ArrayList<>(mapping.get().mapping().keySet()); enrichFields.remove(matchField); - return new EnrichPolicyResolution( - policyName, - new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(idxName), matchField, enrichFields), - mapping - ); + EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(idxName), matchField, enrichFields); + EnrichResolution enrichResolution = new EnrichResolution(); + enrichResolution.addResolvedPolicy(policyName, policy, Map.of("", idxName), mapping.get().mapping()); + enrichResolution.addExistingPolicies(Set.of(policyName)); + return enrichResolution; } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 90e45a0a8b5a7..a01e5bc5f55d2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.expression.function.aggregate.Max; import org.elasticsearch.xpack.esql.parser.ParsingException; @@ -1455,10 +1454,9 @@ public void testEnrichFieldsIncludeMatchField() { IndexResolution testIndex = loadMapping("mapping-basic.json", "test"); IndexResolution languageIndex = loadMapping("mapping-languages.json", "languages"); var enrichPolicy = new EnrichPolicy("match", null, List.of("unused"), "language_code", List.of("language_code", "language_name")); - EnrichResolution enrichResolution = new EnrichResolution( - Set.of(new EnrichPolicyResolution("languages", enrichPolicy, languageIndex)), - Set.of("languages") - ); + EnrichResolution enrichResolution = new EnrichResolution(); + enrichResolution.addResolvedPolicy("languages", enrichPolicy, Map.of("", "languages"), languageIndex.get().mapping()); + enrichResolution.addExistingPolicies(Set.of("languages")); AnalyzerContext context = new AnalyzerContext(configuration(query), new EsqlFunctionRegistry(), testIndex, enrichResolution); Analyzer analyzer = new Analyzer(context, TEST_VERIFIER); LogicalPlan plan = analyze(query, analyzer); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 5887d61c652bb..2716c4ff5195e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.analysis.Verifier; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; @@ -133,25 +132,17 @@ public void init() { physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(config)); FunctionRegistry functionRegistry = new EsqlFunctionRegistry(); mapper = new Mapper(functionRegistry); - var enrichResolution = new EnrichResolution( - Set.of( - new EnrichPolicyResolution( - "foo", - new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("idx"), "fld", List.of("a", "b")), - IndexResolution.valid( - new EsIndex( - "idx", - Map.ofEntries( - Map.entry("a", new EsField("a", DataTypes.INTEGER, Map.of(), true)), - Map.entry("b", new EsField("b", DataTypes.LONG, Map.of(), true)) - ) - ) - ) - ) - ), - Set.of("foo") + EnrichResolution enrichResolution = new EnrichResolution(); + enrichResolution.addResolvedPolicy( + "foo", + new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("idx"), "fld", List.of("a", "b")), + Map.of("", "idx"), + Map.ofEntries( + Map.entry("a", new EsField("a", DataTypes.INTEGER, Map.of(), true)), + Map.entry("b", new EsField("b", DataTypes.LONG, Map.of(), true)) + ) ); - + enrichResolution.addExistingPolicies(Set.of("foo")); analyzer = new Analyzer( new AnalyzerContext(config, functionRegistry, getIndexResult, enrichResolution), new Verifier(new Metrics()) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 6320294d7ee54..2a4cf459a7c32 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -15,8 +15,6 @@ import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils; -import org.elasticsearch.xpack.esql.analysis.EnrichResolution; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; @@ -82,7 +80,6 @@ import java.util.List; import java.util.Map; -import java.util.Set; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -128,19 +125,14 @@ public static void init() { IndexResolution getIndexResult = IndexResolution.valid(test); logicalOptimizer = new LogicalPlanOptimizer(new LogicalOptimizerContext(EsqlTestUtils.TEST_CFG)); - EnrichPolicyResolution policy = AnalyzerTestUtils.loadEnrichPolicyResolution( + var enrichResolution = AnalyzerTestUtils.loadEnrichPolicyResolution( "languages_idx", "id", "languages_idx", "mapping-languages.json" ); analyzer = new Analyzer( - new AnalyzerContext( - EsqlTestUtils.TEST_CFG, - new EsqlFunctionRegistry(), - getIndexResult, - new EnrichResolution(Set.of(policy), Set.of("languages_idx", "something")) - ), + new AnalyzerContext(EsqlTestUtils.TEST_CFG, new EsqlFunctionRegistry(), getIndexResult, enrichResolution), TEST_VERIFIER ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index c05e11d8d8a13..6a1bffe22cd7a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; @@ -164,25 +163,17 @@ public void init() { physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(config)); FunctionRegistry functionRegistry = new EsqlFunctionRegistry(); mapper = new Mapper(functionRegistry); - var enrichResolution = new EnrichResolution( - Set.of( - new EnrichPolicyResolution( - "foo", - new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("idx"), "fld", List.of("a", "b")), - IndexResolution.valid( - new EsIndex( - "idx", - Map.ofEntries( - Map.entry("a", new EsField("a", DataTypes.INTEGER, Map.of(), true)), - Map.entry("b", new EsField("b", DataTypes.LONG, Map.of(), true)) - ) - ) - ) - ) - ), - Set.of("foo") + EnrichResolution enrichResolution = new EnrichResolution(); + enrichResolution.addResolvedPolicy( + "foo", + new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("idx"), "fld", List.of("a", "b")), + Map.of("", "idx"), + Map.ofEntries( + Map.entry("a", new EsField("a", DataTypes.INTEGER, Map.of(), true)), + Map.entry("b", new EsField("b", DataTypes.LONG, Map.of(), true)) + ) ); - + enrichResolution.addExistingPolicies(Set.of("foo")); analyzer = new Analyzer(new AnalyzerContext(config, functionRegistry, getIndexResult, enrichResolution), TEST_VERIFIER); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index 931c96a8cb8ed..fc23a773effdf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -54,6 +54,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import java.util.Map; import java.util.function.Function; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; @@ -683,6 +684,7 @@ public void testEnrich() { new Literal(EMPTY, "countries", KEYWORD), new EmptyAttribute(EMPTY), null, + Map.of(), List.of() ), processingCommand("enrich countries") @@ -696,6 +698,7 @@ public void testEnrich() { new Literal(EMPTY, "index-policy", KEYWORD), new UnresolvedAttribute(EMPTY, "field_underscore"), null, + Map.of(), List.of() ), processingCommand("enrich index-policy ON field_underscore") @@ -710,6 +713,7 @@ public void testEnrich() { new Literal(EMPTY, "countries", KEYWORD), new UnresolvedAttribute(EMPTY, "country_code"), null, + Map.of(), List.of() ), processingCommand("enrich [ccq.mode :" + mode.name() + "] countries ON country_code") diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java index 3ac1453e6ad8f..c1ef69a0bf7ca 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java @@ -120,6 +120,7 @@ private Matcher maxPageSizeMatcher(boolean estimatedRowSizeIsHuge, int private LocalExecutionPlanner planner() throws IOException { return new LocalExecutionPlanner( "test", + "", null, BigArrays.NON_RECYCLING_INSTANCE, TestBlockFactory.getNonBreakingInstance(), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java index 1947249086568..37009c67e2c94 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/stats/PlanExecutorMetricsTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; +import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.analysis.VerificationException; import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolver; import org.elasticsearch.xpack.esql.execution.PlanExecutor; @@ -54,14 +55,23 @@ public void shutdownThreadPool() throws Exception { terminate(threadPool); } + @SuppressWarnings("unchecked") + EnrichPolicyResolver mockEnrichResolver() { + EnrichPolicyResolver enrichResolver = mock(EnrichPolicyResolver.class); + doAnswer(invocation -> { + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new EnrichResolution()); + return null; + }).when(enrichResolver).resolvePolicy(any(), any()); + return enrichResolver; + } + public void testFailedMetric() { Client client = mock(Client.class); IndexResolver idxResolver = new IndexResolver(client, randomAlphaOfLength(10), EsqlDataTypeRegistry.INSTANCE, Set::of); var planExecutor = new PlanExecutor(idxResolver); String[] indices = new String[] { "test" }; - EnrichPolicyResolver enrichResolver = mock(EnrichPolicyResolver.class); - when(enrichResolver.allPolicyNames()).thenReturn(Set.of()); - + var enrichResolver = mockEnrichResolver(); // simulate a valid field_caps response so we can parse and correctly analyze de query FieldCapabilitiesResponse fieldCapabilitiesResponse = mock(FieldCapabilitiesResponse.class); when(fieldCapabilitiesResponse.getIndices()).thenReturn(indices); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java index c4350c8ec74d7..43dec76c7de24 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/tree/EsqlNodeSubclassTests.java @@ -11,8 +11,6 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.dissect.DissectParser; -import org.elasticsearch.xpack.core.enrich.EnrichPolicy; -import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; import org.elasticsearch.xpack.esql.expression.function.scalar.ip.CIDRMatch; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Pow; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; @@ -34,7 +32,6 @@ import org.elasticsearch.xpack.ql.expression.UnresolvedNamedExpression; import org.elasticsearch.xpack.ql.expression.UnresolvedStar; import org.elasticsearch.xpack.ql.expression.function.UnresolvedFunction; -import org.elasticsearch.xpack.ql.index.IndexResolution; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.ql.tree.Node; import org.elasticsearch.xpack.ql.tree.NodeSubclassTests; @@ -86,20 +83,6 @@ protected Object pluggableMakeArg(Class> toBuildClass, Class enrichFields = randomSubsetOf(List.of("e1", "e2", "e3")); + return new EnrichPolicy(randomFrom("match", "range"), null, List.of(), randomFrom("m1", "m2"), enrichFields); + } if (Pipe.class == argClass) { /* From a059472ad2bfe6b04473d1bbe7538af283542013 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Wed, 17 Jan 2024 07:49:27 +0100 Subject: [PATCH 62/95] Treat failed shard as no data in GetStatusActionIT (#104372) Among other things, the profiling get status API checks whether a cluster contains any profiling-related data. It uses the search API for that internally since #103920. However, shards might not be ready shortly after index creation and consequently that search request will fail. With this commit we check whether a search phase has failed and treat this as if the cluster contains no profiling-related data. Closes #104035 Closes #104038 --- .../xpack/profiling/GetStatusActionIT.java | 3 ++- .../xpack/profiling/TransportGetStatusAction.java | 13 ++++++++++++- .../rest-api-spec/test/profiling/10_basic.yml | 6 ++---- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java index 8dbab6e8c06a5..f3417dbf5d472 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java @@ -52,10 +52,11 @@ public void testNoTimeoutIfNotWaiting() throws Exception { assertFalse(response.hasData()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/104035") public void testWaitsUntilResourcesAreCreated() throws Exception { updateProfilingTemplatesEnabled(true); GetStatusAction.Request request = new GetStatusAction.Request(); + // higher timeout since we have more shards than usual + request.timeout(TimeValue.timeValueSeconds(120)); request.waitForResourcesCreated(true); GetStatusAction.Response response = client().execute(GetStatusAction.INSTANCE, request).get(); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java index 004eae1395dc1..d918a0def7ebb 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; @@ -177,7 +178,17 @@ private void execute(ClusterState state, ActionListener { + // no data yet + if (e instanceof SearchPhaseExecutionException) { + log.trace("Has data check has failed.", e); + listener.onResponse( + new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, resourcesCreated, anyPre891Data, false) + ); + } else { + listener.onFailure(e); + } + })); } else { listener.onResponse(new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, false, anyPre891Data, false)); } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml index 5adbf782f3236..8bc863e6fca9f 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml @@ -1,10 +1,8 @@ --- setup: - skip: - version: all - reason: AwaitsFix https://github.com/elastic/elasticsearch/issues/104038 - # version: " - 8.12.99" - # reason: "Universal Profiling test infrastructure is available in 8.12+" + version: " - 8.12.99" + reason: "Universal Profiling test infrastructure is available in 8.12+" - do: cluster.put_settings: From 54092b40b752a012aa4b2a461dedb7655d07a015 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Wed, 17 Jan 2024 08:05:38 +0100 Subject: [PATCH 63/95] Speedup testTasksWaitForAllTask (#104377) This test relies on setWaitForCompletion expiring as task is always running. No need to wait for it 10 seconds. --- .../elasticsearch/action/admin/cluster/node/tasks/TasksIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 3f0436c685781..884f6dbcd677e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -721,7 +721,7 @@ public void testTasksWaitForAllTask() throws Exception { .map(PersistentTasksCustomMetadata.PersistentTask::getExecutorNode) .collect(Collectors.toSet()); // Spin up a request to wait for all tasks in the cluster to make sure it doesn't cause an infinite loop - ListTasksResponse response = clusterAdmin().prepareListTasks().setWaitForCompletion(true).setTimeout(timeValueSeconds(10)).get(); + ListTasksResponse response = clusterAdmin().prepareListTasks().setWaitForCompletion(true).setTimeout(timeValueSeconds(1)).get(); // We expect the nodes that are running always-running-tasks to report FailedNodeException and fail to list their tasks assertThat(response.getNodeFailures().size(), equalTo(nodesRunningTasks.size())); From 66e3e1586f94cc8d6c7a96f38fc6c5b35110a5a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aur=C3=A9lien=20FOUCRET?= Date: Wed, 17 Jan 2024 08:10:48 +0100 Subject: [PATCH 64/95] [LTR] Rescore window size improvements. (#104318) --- .../search/rescore/RescorerBuilder.java | 23 ++- .../inference/ltr/LearningToRankRescorer.java | 9 + .../ltr/LearningToRankRescorerBuilder.java | 17 +- ...RankRescorerBuilderSerializationTests.java | 155 ++++++++---------- 4 files changed, 105 insertions(+), 99 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java index 76ee7e09ad870..4c42daba22b7a 100644 --- a/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/rescore/RescorerBuilder.java @@ -73,6 +73,8 @@ public static RescorerBuilder parseFromXContent(XContentParser parser, Consum RescorerBuilder rescorer = null; Integer windowSize = null; XContentParser.Token token; + String rescorerType = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); @@ -83,8 +85,11 @@ public static RescorerBuilder parseFromXContent(XContentParser parser, Consum throw new ParsingException(parser.getTokenLocation(), "rescore doesn't support [" + fieldName + "]"); } } else if (token == XContentParser.Token.START_OBJECT) { - rescorer = parser.namedObject(RescorerBuilder.class, fieldName, null); - rescorerNameConsumer.accept(fieldName); + if (fieldName != null) { + rescorer = parser.namedObject(RescorerBuilder.class, fieldName, null); + rescorerNameConsumer.accept(fieldName); + rescorerType = fieldName; + } } else { throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "] after [" + fieldName + "]"); } @@ -92,9 +97,13 @@ public static RescorerBuilder parseFromXContent(XContentParser parser, Consum if (rescorer == null) { throw new ParsingException(parser.getTokenLocation(), "missing rescore type"); } + if (windowSize != null) { rescorer.windowSize(windowSize.intValue()); + } else if (rescorer.isWindowSizeRequired()) { + throw new ParsingException(parser.getTokenLocation(), "window_size is required for rescorer of type [" + rescorerType + "]"); } + return rescorer; } @@ -111,11 +120,21 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws protected abstract void doXContent(XContentBuilder builder, Params params) throws IOException; + /** + * Indicate if the window_size is a required parameter for the rescorer. + */ + protected boolean isWindowSizeRequired() { + return false; + } + /** * Build the {@linkplain RescoreContext} that will be used to actually * execute the rescore against a particular shard. */ public final RescoreContext buildContext(SearchExecutionContext context) throws IOException { + if (isWindowSizeRequired()) { + assert windowSize != null; + } int finalWindowSize = windowSize == null ? DEFAULT_WINDOW_SIZE : windowSize; RescoreContext rescoreContext = innerBuildContext(finalWindowSize, context); return rescoreContext; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorer.java index 068462bcdfca2..4e3fa3addaf30 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorer.java @@ -55,6 +55,15 @@ public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext r if (ltrRescoreContext.regressionModelDefinition == null) { throw new IllegalStateException("local model reference is null, missing rewriteAndFetch before rescore phase?"); } + + if (rescoreContext.getWindowSize() < topDocs.scoreDocs.length) { + throw new IllegalArgumentException( + "Rescore window is too small and should be at least the value of from + size but was [" + + rescoreContext.getWindowSize() + + "]" + ); + } + LocalModel definition = ltrRescoreContext.regressionModelDefinition; // First take top slice of incoming docs, to be rescored: diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java index 11676cc4a1599..a5a7859a7f938 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilder.java @@ -32,10 +32,10 @@ public class LearningToRankRescorerBuilder extends RescorerBuilder { - public static final String NAME = "learning_to_rank"; - private static final ParseField MODEL_FIELD = new ParseField("model_id"); - private static final ParseField PARAMS_FIELD = new ParseField("params"); - private static final ObjectParser PARSER = new ObjectParser<>(NAME, false, Builder::new); + public static final ParseField NAME = new ParseField("learning_to_rank"); + public static final ParseField MODEL_FIELD = new ParseField("model_id"); + public static final ParseField PARAMS_FIELD = new ParseField("params"); + private static final ObjectParser PARSER = new ObjectParser<>(NAME.getPreferredName(), false, Builder::new); static { PARSER.declareString(Builder::setModelId, MODEL_FIELD); @@ -251,7 +251,7 @@ protected LearningToRankRescorerContext innerBuildContext(int windowSize, Search @Override public String getWriteableName() { - return NAME; + return NAME.getPreferredName(); } @Override @@ -260,6 +260,11 @@ public TransportVersion getMinimalSupportedVersion() { return TransportVersion.current(); } + @Override + protected boolean isWindowSizeRequired() { + return true; + } + @Override protected void doWriteTo(StreamOutput out) throws IOException { assert localModel == null || rescoreOccurred : "Unnecessarily populated local model object"; @@ -270,7 +275,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(NAME); + builder.startObject(NAME.getPreferredName()); builder.field(MODEL_FIELD.getPreferredName(), modelId); if (this.params != null) { builder.field(PARAMS_FIELD.getPreferredName(), this.params); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilderSerializationTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilderSerializationTests.java index 79044a465442b..f52d05fc3220d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilderSerializationTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankRescorerBuilderSerializationTests.java @@ -9,14 +9,19 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.rescore.RescorerBuilder; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearningToRankConfig; @@ -25,48 +30,36 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; -import static org.elasticsearch.search.rank.RankBuilder.WINDOW_SIZE_FIELD; import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.LearningToRankConfigTests.randomLearningToRankConfig; +import static org.hamcrest.Matchers.equalTo; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class LearningToRankRescorerBuilderSerializationTests extends AbstractBWCSerializationTestCase { private static LearningToRankService learningToRankService = mock(LearningToRankService.class); - @Override - protected LearningToRankRescorerBuilder doParseInstance(XContentParser parser) throws IOException { - String fieldName = null; - LearningToRankRescorerBuilder rescorer = null; - Integer windowSize = null; - XContentParser.Token token = parser.nextToken(); - assert token == XContentParser.Token.START_OBJECT; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } else if (token.isValue()) { - if (WINDOW_SIZE_FIELD.match(fieldName, parser.getDeprecationHandler())) { - windowSize = parser.intValue(); - } else { - throw new ParsingException(parser.getTokenLocation(), "rescore doesn't support [" + fieldName + "]"); + public void testRequiredWindowSize() throws IOException { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + LearningToRankRescorerBuilder testInstance = createTestInstance(); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + testInstance.doXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + try (XContentParser parser = JsonXContent.jsonXContent.createParser(parserConfig(), Strings.toString(builder))) { + ParsingException e = expectThrows(ParsingException.class, () -> RescorerBuilder.parseFromXContent(parser, (r) -> {})); + assertThat(e.getMessage(), equalTo("window_size is required for rescorer of type [learning_to_rank]")); } - } else if (token == XContentParser.Token.START_OBJECT) { - rescorer = LearningToRankRescorerBuilder.fromXContent(parser, learningToRankService); - } else { - throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "] after [" + fieldName + "]"); } } - if (rescorer == null) { - throw new ParsingException(parser.getTokenLocation(), "missing rescore type"); - } - if (windowSize != null) { - rescorer.windowSize(windowSize); - } - return rescorer; + } + + @Override + protected LearningToRankRescorerBuilder doParseInstance(XContentParser parser) throws IOException { + return (LearningToRankRescorerBuilder) RescorerBuilder.parseFromXContent(parser, (r) -> {}); } @Override @@ -85,76 +78,49 @@ protected LearningToRankRescorerBuilder createTestInstance() { learningToRankService ); - if (randomBoolean()) { - builder.windowSize(randomIntBetween(1, 10000)); - } + builder.windowSize(randomIntBetween(1, 10000)); return builder; } @Override protected LearningToRankRescorerBuilder createXContextTestInstance(XContentType xContentType) { - return new LearningToRankRescorerBuilder(randomAlphaOfLength(10), randomBoolean() ? randomParams() : null, learningToRankService); + return new LearningToRankRescorerBuilder(randomAlphaOfLength(10), randomBoolean() ? randomParams() : null, learningToRankService) + .windowSize(randomIntBetween(1, 10000)); } @Override protected LearningToRankRescorerBuilder mutateInstance(LearningToRankRescorerBuilder instance) throws IOException { - int i = randomInt(4); return switch (i) { - case 0 -> { - LearningToRankRescorerBuilder builder = new LearningToRankRescorerBuilder( - randomValueOtherThan(instance.modelId(), () -> randomAlphaOfLength(10)), - instance.params(), - learningToRankService - ); - if (instance.windowSize() != null) { - builder.windowSize(instance.windowSize()); - } - yield builder; - } + case 0 -> new LearningToRankRescorerBuilder( + randomValueOtherThan(instance.modelId(), () -> randomAlphaOfLength(10)), + instance.params(), + learningToRankService + ).windowSize(instance.windowSize()); case 1 -> new LearningToRankRescorerBuilder(instance.modelId(), instance.params(), learningToRankService).windowSize( randomValueOtherThan(instance.windowSize(), () -> randomIntBetween(1, 10000)) ); - case 2 -> { - LearningToRankRescorerBuilder builder = new LearningToRankRescorerBuilder( - instance.modelId(), - randomValueOtherThan(instance.params(), () -> (randomBoolean() ? randomParams() : null)), - learningToRankService - ); - if (instance.windowSize() != null) { - builder.windowSize(instance.windowSize() + 1); - } - yield builder; - } + case 2 -> new LearningToRankRescorerBuilder( + instance.modelId(), + randomValueOtherThan(instance.params(), () -> (randomBoolean() ? randomParams() : null)), + learningToRankService + ).windowSize(instance.windowSize()); case 3 -> { LearningToRankConfig learningToRankConfig = randomValueOtherThan( instance.learningToRankConfig(), () -> randomLearningToRankConfig() ); - LearningToRankRescorerBuilder builder = new LearningToRankRescorerBuilder( - instance.modelId(), - learningToRankConfig, - null, - learningToRankService + yield new LearningToRankRescorerBuilder(instance.modelId(), learningToRankConfig, null, learningToRankService).windowSize( + instance.windowSize() ); - if (instance.windowSize() != null) { - builder.windowSize(instance.windowSize()); - } - yield builder; - } - case 4 -> { - LearningToRankRescorerBuilder builder = new LearningToRankRescorerBuilder( - mock(LocalModel.class), - instance.learningToRankConfig(), - instance.params(), - learningToRankService - ); - if (instance.windowSize() != null) { - builder.windowSize(instance.windowSize()); - } - yield builder; } + case 4 -> new LearningToRankRescorerBuilder( + mock(LocalModel.class), + instance.learningToRankConfig(), + instance.params(), + learningToRankService + ).windowSize(instance.windowSize()); default -> throw new AssertionError("Unexpected random test case"); }; } @@ -169,31 +135,38 @@ protected NamedXContentRegistry xContentRegistry() { List namedXContent = new ArrayList<>(); namedXContent.addAll(new MlInferenceNamedXContentProvider().getNamedXContentParsers()); namedXContent.addAll(new MlLTRNamedXContentProvider().getNamedXContentParsers()); - namedXContent.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedXContents()); + namedXContent.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedXContents()); + namedXContent.add( + new NamedXContentRegistry.Entry( + RescorerBuilder.class, + LearningToRankRescorerBuilder.NAME, + (p, c) -> LearningToRankRescorerBuilder.fromXContent(p, learningToRankService) + ) + ); return new NamedXContentRegistry(namedXContent); } + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return writableRegistry(); + } + @Override protected NamedWriteableRegistry writableRegistry() { List namedWriteables = new ArrayList<>(new MlInferenceNamedXContentProvider().getNamedWriteables()); namedWriteables.addAll(new MlLTRNamedXContentProvider().getNamedWriteables()); - namedWriteables.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedWriteables()); + namedWriteables.addAll(new SearchModule(Settings.EMPTY, List.of()).getNamedWriteables()); + namedWriteables.add( + new NamedWriteableRegistry.Entry( + RescorerBuilder.class, + LearningToRankRescorerBuilder.NAME.getPreferredName(), + in -> new LearningToRankRescorerBuilder(in, learningToRankService) + ) + ); return new NamedWriteableRegistry(namedWriteables); } - @Override - protected NamedWriteableRegistry getNamedWriteableRegistry() { - return writableRegistry(); - } - private static Map randomParams() { return randomMap(1, randomIntBetween(1, 10), () -> new Tuple<>(randomIdentifier(), randomIdentifier())); } - - private static LocalModel localModelMock() { - LocalModel model = mock(LocalModel.class); - String modelId = randomIdentifier(); - when(model.getModelId()).thenReturn(modelId); - return model; - } } From d1dfeba31390cb02191d1d82610595fd7daa9951 Mon Sep 17 00:00:00 2001 From: Andrew Wilkins Date: Wed, 17 Jan 2024 16:06:53 +0800 Subject: [PATCH 65/95] x-pack/plugin/apm-data: add dynamic setting for enabling template registry (#104386) Add `xpack.apm_data.registry.enabled` cluster setting, which can be used to disable the index template registry while restoring a snapshot. Closes https://github.com/elastic/elasticsearch/issues/104385 --- docs/changelog/104386.yaml | 6 ++ .../apmdata/APMIndexTemplateRegistry.java | 14 ++- .../xpack/apmdata/APMPlugin.java | 50 ++++++++--- .../APMIndexTemplateRegistryTests.java | 38 +++++++- .../xpack/apmdata/APMPluginTests.java | 89 +++++++++++++++++++ 5 files changed, 178 insertions(+), 19 deletions(-) create mode 100644 docs/changelog/104386.yaml create mode 100644 x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMPluginTests.java diff --git a/docs/changelog/104386.yaml b/docs/changelog/104386.yaml new file mode 100644 index 0000000000000..41b6a17424bbd --- /dev/null +++ b/docs/changelog/104386.yaml @@ -0,0 +1,6 @@ +pr: 104386 +summary: "X-pack/plugin/apm-data: add dynamic setting for enabling template registry" +area: Data streams +type: enhancement +issues: + - 104385 diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java index f528d99133756..6ec287fe2b980 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.apmdata; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; @@ -19,7 +21,6 @@ import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.yaml.YamlXContent; import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.template.IndexTemplateRegistry; import org.elasticsearch.xpack.core.template.IngestPipelineConfig; @@ -37,12 +38,14 @@ * Creates all index templates and ingest pipelines that are required for using Elastic APM. */ public class APMIndexTemplateRegistry extends IndexTemplateRegistry { + private static final Logger logger = LogManager.getLogger(APMIndexTemplateRegistry.class); + private final int version; private final Map componentTemplates; private final Map composableIndexTemplates; private final List ingestPipelines; - private final boolean enabled; + private volatile boolean enabled; @SuppressWarnings("unchecked") public APMIndexTemplateRegistry( @@ -75,8 +78,6 @@ public APMIndexTemplateRegistry( Map.Entry> pipelineConfig = map.entrySet().iterator().next(); return loadIngestPipeline(pipelineConfig.getKey(), version, (List) pipelineConfig.getValue().get("dependencies")); }).collect(Collectors.toList()); - - enabled = XPackSettings.APM_DATA_ENABLED.get(nodeSettings); } catch (IOException e) { throw new RuntimeException(e); } @@ -86,6 +87,11 @@ public int getVersion() { return version; } + void setEnabled(boolean enabled) { + logger.info("APM index template registry is {}", enabled ? "enabled" : "disabled"); + this.enabled = enabled; + } + public boolean isEnabled() { return enabled; } diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java index 7acf3a3c972da..f905c17c04b4c 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMPlugin.java @@ -10,36 +10,62 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.core.XPackSettings; import java.util.Collection; +import java.util.Collections; import java.util.List; public class APMPlugin extends Plugin implements ActionPlugin { private static final Logger logger = LogManager.getLogger(APMPlugin.class); - private final SetOnce registry = new SetOnce<>(); + final SetOnce registry = new SetOnce<>(); + + private final boolean enabled; + + // APM_DATA_REGISTRY_ENABLED controls enabling the index template registry. + // + // This setting will be ignored if the plugin is disabled. + static final Setting APM_DATA_REGISTRY_ENABLED = Setting.boolSetting( + "xpack.apm_data.registry.enabled", + true, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + public APMPlugin(Settings settings) { + this.enabled = XPackSettings.APM_DATA_ENABLED.get(settings); + } @Override public Collection createComponents(PluginServices services) { + logger.info("APM ingest plugin is {}", enabled ? "enabled" : "disabled"); + Settings settings = services.environment().settings(); + ClusterService clusterService = services.clusterService(); registry.set( - new APMIndexTemplateRegistry( - services.environment().settings(), - services.clusterService(), - services.threadPool(), - services.client(), - services.xContentRegistry() - ) + new APMIndexTemplateRegistry(settings, clusterService, services.threadPool(), services.client(), services.xContentRegistry()) ); - APMIndexTemplateRegistry registryInstance = registry.get(); - logger.info("APM ingest plugin is {}", registryInstance.isEnabled() ? "enabled" : "disabled"); - registryInstance.initialize(); - return List.of(registryInstance); + if (enabled) { + APMIndexTemplateRegistry registryInstance = registry.get(); + registryInstance.setEnabled(APM_DATA_REGISTRY_ENABLED.get(settings)); + clusterService.getClusterSettings().addSettingsUpdateConsumer(APM_DATA_REGISTRY_ENABLED, registryInstance::setEnabled); + registryInstance.initialize(); + } + return Collections.emptyList(); } @Override public void close() { registry.get().close(); } + + @Override + public List> getSettings() { + return List.of(APM_DATA_REGISTRY_ENABLED); + } } diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java index c10c3fde45162..4f6a5b58ff38d 100644 --- a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.features.FeatureService; import org.elasticsearch.ingest.IngestMetadata; @@ -55,11 +56,12 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import java.util.stream.Stream; -import static org.elasticsearch.xpack.core.XPackSettings.APM_DATA_ENABLED; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; @@ -77,20 +79,28 @@ public class APMIndexTemplateRegistryTests extends ESTestCase { @Before public void createRegistryAndClient() { + final ClusterSettings clusterSettings = new ClusterSettings( + Settings.EMPTY, + Stream.concat(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), Set.of(APMPlugin.APM_DATA_REGISTRY_ENABLED).stream()) + .collect(Collectors.toSet()) + ); + threadPool = new TestThreadPool(this.getClass().getName()); client = new VerifyingClient(threadPool); - clusterService = ClusterServiceUtils.createClusterService(threadPool); + clusterService = ClusterServiceUtils.createClusterService(threadPool, clusterSettings); FeatureService featureService = new FeatureService(List.of()); stackTemplateRegistryAccessor = new StackTemplateRegistryAccessor( new StackTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, NamedXContentRegistry.EMPTY, featureService) ); + apmIndexTemplateRegistry = new APMIndexTemplateRegistry( - Settings.builder().put(APM_DATA_ENABLED.getKey(), true).build(), + Settings.EMPTY, clusterService, threadPool, client, NamedXContentRegistry.EMPTY ); + apmIndexTemplateRegistry.setEnabled(true); } @After @@ -113,6 +123,28 @@ public void testThatMissingMasterNodeDoesNothing() { apmIndexTemplateRegistry.clusterChanged(event); } + public void testThatDisablingRegistryDoesNothing() throws Exception { + DiscoveryNode node = DiscoveryNodeUtils.create("node"); + DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); + + apmIndexTemplateRegistry.setEnabled(false); + assertThat(apmIndexTemplateRegistry.getComponentTemplateConfigs().entrySet(), hasSize(0)); + assertThat(apmIndexTemplateRegistry.getComposableTemplateConfigs().entrySet(), hasSize(0)); + assertThat(apmIndexTemplateRegistry.getIngestPipelines(), hasSize(0)); + + client.setVerifier((a, r, l) -> { + fail("if the registry is disabled nothing should happen"); + return null; + }); + ClusterChangedEvent event = createClusterChangedEvent(Map.of(), Map.of(), nodes); + apmIndexTemplateRegistry.clusterChanged(event); + + apmIndexTemplateRegistry.setEnabled(true); + assertThat(apmIndexTemplateRegistry.getComponentTemplateConfigs().entrySet(), not(hasSize(0))); + assertThat(apmIndexTemplateRegistry.getComposableTemplateConfigs().entrySet(), not(hasSize(0))); + assertThat(apmIndexTemplateRegistry.getIngestPipelines(), not(hasSize(0))); + } + public void testThatIndependentTemplatesAreAddedImmediatelyIfMissing() throws Exception { DiscoveryNode node = DiscoveryNodeUtils.create("node"); DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMPluginTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMPluginTests.java new file mode 100644 index 0000000000000..289852737393e --- /dev/null +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMPluginTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.apmdata; + +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.XPackSettings; +import org.junit.After; +import org.junit.Before; + +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class APMPluginTests extends ESTestCase { + private APMPlugin apmPlugin; + private ClusterService clusterService; + private ThreadPool threadPool; + + @Before + public void createPlugin() { + final ClusterSettings clusterSettings = new ClusterSettings( + Settings.EMPTY, + Stream.concat(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.stream(), Set.of(APMPlugin.APM_DATA_REGISTRY_ENABLED).stream()) + .collect(Collectors.toSet()) + ); + threadPool = new TestThreadPool(this.getClass().getName()); + clusterService = ClusterServiceUtils.createClusterService(threadPool, clusterSettings); + apmPlugin = new APMPlugin(Settings.builder().put(XPackSettings.APM_DATA_ENABLED.getKey(), true).build()); + } + + private void createComponents() { + Environment mockEnvironment = mock(Environment.class); + when(mockEnvironment.settings()).thenReturn(Settings.builder().build()); + Plugin.PluginServices services = mock(Plugin.PluginServices.class); + when(services.clusterService()).thenReturn(clusterService); + when(services.threadPool()).thenReturn(threadPool); + when(services.environment()).thenReturn(mockEnvironment); + apmPlugin.createComponents(services); + } + + @After + @Override + public void tearDown() throws Exception { + super.tearDown(); + apmPlugin.close(); + threadPool.shutdownNow(); + } + + public void testRegistryEnabledSetting() throws Exception { + createComponents(); + + // By default, the registry is enabled. + assertTrue(apmPlugin.registry.get().isEnabled()); + + // The registry can be disabled/enabled dynamically. + clusterService.getClusterSettings() + .applySettings(Settings.builder().put(APMPlugin.APM_DATA_REGISTRY_ENABLED.getKey(), false).build()); + assertFalse(apmPlugin.registry.get().isEnabled()); + } + + public void testDisablingPluginDisablesRegistry() throws Exception { + apmPlugin = new APMPlugin(Settings.builder().put(XPackSettings.APM_DATA_ENABLED.getKey(), false).build()); + createComponents(); + + // The plugin is disabled, so the registry is disabled too. + assertFalse(apmPlugin.registry.get().isEnabled()); + + // The registry can not be enabled dynamically when the plugin is disabled. + clusterService.getClusterSettings() + .applySettings(Settings.builder().put(APMPlugin.APM_DATA_REGISTRY_ENABLED.getKey(), true).build()); + assertFalse(apmPlugin.registry.get().isEnabled()); + } +} From d998271bd672daccc540a2e3d7e13bd276f3d3a9 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 17 Jan 2024 20:22:26 +1100 Subject: [PATCH 66/95] [CI] Exclude retries from Azure fixture server side request tracking (#104444) We do not yet include retries in Azure client side request tracking (#104443). Hence we exclude them from the server side tracking as well in test. Resolves: #104362 --- .../azure/AzureBlobStoreRepositoryTests.java | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index f5c1912d15251..e916b02e62b8e 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.blobstore.ESMockAPIBasedRepositoryIntegTestCase; @@ -41,6 +42,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Predicate; import java.util.regex.Pattern; @@ -196,12 +198,21 @@ private static class AzureHTTPStatsCollectorHandler extends HttpStatsCollectorHa private static final Predicate LIST_PATTERN = Pattern.compile("GET /[a-zA-Z0-9]+/[a-zA-Z0-9]+\\?.+").asMatchPredicate(); private static final Predicate GET_BLOB_PATTERN = Pattern.compile("GET /[a-zA-Z0-9]+/[a-zA-Z0-9]+/.+").asMatchPredicate(); + private final Set seenRequestIds = ConcurrentCollections.newConcurrentSet(); + private AzureHTTPStatsCollectorHandler(HttpHandler delegate) { super(delegate); } @Override protected void maybeTrack(String request, Headers headers) { + // Same request id is a retry + // https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-ncnbi/817da997-30d2-4cd3-972f-a0073e4e98f7 + // Do not count retries since the client side request stats do not track them yet. + // See https://github.com/elastic/elasticsearch/issues/104443 + if (false == seenRequestIds.add(headers.getFirst("X-ms-client-request-id"))) { + return; + } if (GET_BLOB_PATTERN.test(request)) { trackRequest("GetBlob"); } else if (Regex.simpleMatch("HEAD /*/*/*", request)) { From c21389326549a384ba427d0b7c5b3824eadd22ea Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 17 Jan 2024 09:39:23 +0000 Subject: [PATCH 67/95] Rename transport version constant used for 8.9 releases (#104402) --- .../java/org/elasticsearch/ElasticsearchException.java | 4 ++-- .../main/java/org/elasticsearch/TransportVersions.java | 2 +- .../action/admin/cluster/node/stats/NodeStats.java | 4 ++-- .../action/admin/indices/stats/ShardStats.java | 4 ++-- .../template/get/GetComponentTemplateAction.java | 8 ++++---- .../template/get/GetComposableIndexTemplateAction.java | 8 ++++---- .../template/post/SimulateIndexTemplateRequest.java | 4 ++-- .../template/post/SimulateIndexTemplateResponse.java | 4 ++-- .../indices/template/post/SimulateTemplateAction.java | 4 ++-- .../action/datastreams/GetDataStreamAction.java | 10 ++++------ .../action/search/OpenPointInTimeRequest.java | 4 ++-- .../action/search/TransportSearchAction.java | 2 +- .../unpromotable/BroadcastUnpromotableRequest.java | 2 +- .../org/elasticsearch/cluster/metadata/DataStream.java | 4 ++-- .../cluster/metadata/DataStreamLifecycle.java | 4 ++-- .../cluster/metadata/SingleNodeShutdownMetadata.java | 4 ++-- .../org/elasticsearch/cluster/metadata/Template.java | 4 ++-- .../org/elasticsearch/cluster/node/DiscoveryNodes.java | 4 ++-- .../elasticsearch/repositories/RepositoriesStats.java | 4 ++-- .../MedianAbsoluteDeviationAggregationBuilder.java | 4 ++-- .../search/aggregations/metrics/PercentilesConfig.java | 4 ++-- .../search/aggregations/metrics/TDigestState.java | 4 ++-- .../search/builder/SearchSourceBuilder.java | 4 ++-- .../search/internal/ShardSearchRequest.java | 6 ++---- .../action/search/OpenPointInTimeRequestTests.java | 2 +- .../action/search/SearchRequestTests.java | 2 +- .../search/aggregations/metrics/TDigestStateTests.java | 2 +- .../search/internal/ShardSearchRequestTests.java | 2 +- .../analytics/boxplot/BoxplotAggregationBuilder.java | 4 ++-- .../analytics/rate/InternalResetTrackingRate.java | 4 ++-- .../storage/ReactiveStorageDeciderService.java | 2 +- .../src/main/java/org/elasticsearch/xpack/ccr/Ccr.java | 2 +- .../DataStreamLifecycleFeatureSetUsage.java | 6 +++--- .../xpack/core/ml/action/FlushJobAction.java | 4 ++-- .../ml/action/PutTrainedModelVocabularyAction.java | 4 ++-- .../autodetect/output/FlushAcknowledgement.java | 4 ++-- .../xpack/core/search/action/AsyncStatusResponse.java | 4 ++-- .../xpack/core/security/action/apikey/ApiKey.java | 2 +- .../xpack/core/security/authz/RoleDescriptor.java | 2 +- .../core/ml/action/FlushJobActionRequestTests.java | 2 +- .../xpack/eql/action/EqlSearchResponseTests.java | 4 ++-- .../xpack/ml/inference/nlp/Vocabulary.java | 4 ++-- .../xpack/ml/utils/TransportVersionUtilsTests.java | 4 ++-- .../xpack/security/authc/TokenServiceTests.java | 2 +- .../spatial/search/aggregations/InternalGeoLine.java | 4 ++-- 45 files changed, 84 insertions(+), 88 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 50a5f7420847b..237f50befe4bd 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1838,13 +1838,13 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.http.HttpHeadersValidationException.class, org.elasticsearch.http.HttpHeadersValidationException::new, 169, - TransportVersions.V_8_500_020 + TransportVersions.V_8_9_X ), ROLE_RESTRICTION_EXCEPTION( ElasticsearchRoleRestrictionException.class, ElasticsearchRoleRestrictionException::new, 170, - TransportVersions.V_8_500_020 + TransportVersions.V_8_9_X ), API_NOT_AVAILABLE_EXCEPTION(ApiNotAvailableException.class, ApiNotAvailableException::new, 171, TransportVersions.V_8_500_065), RECOVERY_COMMIT_TOO_NEW_EXCEPTION( diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 45bbd551c0e70..c2cd67c88edbe 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -92,7 +92,7 @@ static TransportVersion def(int id) { * READ THE COMMENT BELOW THIS BLOCK OF DECLARATIONS BEFORE ADDING NEW TRANSPORT VERSIONS * Detached transport versions added below here. */ - public static final TransportVersion V_8_500_020 = def(8_500_020); + public static final TransportVersion V_8_9_X = def(8_500_020); public static final TransportVersion V_8_500_061 = def(8_500_061); public static final TransportVersion V_8_500_062 = def(8_500_062); public static final TransportVersion V_8_500_063 = def(8_500_063); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index b8d1a431f92e8..cdb9191bd8d70 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -117,7 +117,7 @@ public NodeStats(StreamInput in) throws IOException { ingestStats = in.readOptionalWriteable(IngestStats::read); adaptiveSelectionStats = in.readOptionalWriteable(AdaptiveSelectionStats::new); indexingPressureStats = in.readOptionalWriteable(IndexingPressureStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); } else { repositoriesStats = null; @@ -294,7 +294,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(ingestStats); out.writeOptionalWriteable(adaptiveSelectionStats); out.writeOptionalWriteable(indexingPressureStats); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(repositoriesStats); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java index f90dc894f1b57..477a0bd910719 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java @@ -61,7 +61,7 @@ public ShardStats(StreamInput in) throws IOException { isCustomDataPath = in.readBoolean(); seqNoStats = in.readOptionalWriteable(SeqNoStats::new); retentionLeaseStats = in.readOptionalWriteable(RetentionLeaseStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { isSearchIdle = in.readBoolean(); searchIdleTime = in.readVLong(); } else { @@ -215,7 +215,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isCustomDataPath); out.writeOptionalWriteable(seqNoStats); out.writeOptionalWriteable(retentionLeaseStats); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(isSearchIdle); out.writeVLong(searchIdleTime); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index ae73904a8447b..1e0a36cfc1a99 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -57,7 +57,7 @@ public Request(String name) { public Request(StreamInput in) throws IOException { super(in); name = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { includeDefaults = in.readBoolean(); } else { includeDefaults = false; @@ -68,7 +68,7 @@ public Request(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(name); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(includeDefaults); } } @@ -121,7 +121,7 @@ public static class Response extends ActionResponse implements ToXContentObject public Response(StreamInput in) throws IOException { super(in); componentTemplates = in.readMap(ComponentTemplate::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); } else { rolloverConfiguration = null; @@ -149,7 +149,7 @@ public RolloverConfiguration getRolloverConfiguration() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(componentTemplates, StreamOutput::writeWriteable); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index f2c041c2c71bc..8401a510a1482 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -58,7 +58,7 @@ public Request(@Nullable String name) { public Request(StreamInput in) throws IOException { super(in); name = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { includeDefaults = in.readBoolean(); } else { includeDefaults = false; @@ -69,7 +69,7 @@ public Request(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(name); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(includeDefaults); } } @@ -123,7 +123,7 @@ public static class Response extends ActionResponse implements ToXContentObject public Response(StreamInput in) throws IOException { super(in); indexTemplates = in.readMap(ComposableIndexTemplate::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); } else { rolloverConfiguration = null; @@ -147,7 +147,7 @@ public Map indexTemplates() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(indexTemplates, StreamOutput::writeWriteable); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java index 6b71be3925478..9281c6d3dd0bc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java @@ -40,7 +40,7 @@ public SimulateIndexTemplateRequest(StreamInput in) throws IOException { super(in); indexName = in.readString(); indexTemplateRequest = in.readOptionalWriteable(TransportPutComposableIndexTemplateAction.Request::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { includeDefaults = in.readBoolean(); } } @@ -50,7 +50,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(indexName); out.writeOptionalWriteable(indexTemplateRequest); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(includeDefaults); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index b7cc8564be062..106f1a7e4f393 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -73,7 +73,7 @@ public SimulateIndexTemplateResponse(StreamInput in) throws IOException { } else { this.overlappingTemplates = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); } } @@ -91,7 +91,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java index 7f637527a6a1f..a1148695ba6d6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java @@ -63,7 +63,7 @@ public Request(StreamInput in) throws IOException { super(in); templateName = in.readOptionalString(); indexTemplateRequest = in.readOptionalWriteable(TransportPutComposableIndexTemplateAction.Request::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { includeDefaults = in.readBoolean(); } } @@ -73,7 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(templateName); out.writeOptionalWriteable(indexTemplateRequest); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(includeDefaults); } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index 073ac021f787a..f591cc22d19a8 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -75,7 +75,7 @@ public Request(StreamInput in) throws IOException { super(in); this.names = in.readOptionalStringArray(); this.indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { this.includeDefaults = in.readBoolean(); } else { this.includeDefaults = false; @@ -87,7 +87,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalStringArray(names); indicesOptions.writeIndicesOptions(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(includeDefaults); } } @@ -481,9 +481,7 @@ public Response(List dataStreams, @Nullable RolloverConfiguratio public Response(StreamInput in) throws IOException { this( in.readCollectionAsList(DataStreamInfo::new), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) - ? in.readOptionalWriteable(RolloverConfiguration::new) - : null + in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(RolloverConfiguration::new) : null ); } @@ -499,7 +497,7 @@ public RolloverConfiguration getRolloverConfiguration() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeCollection(dataStreams); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java index 39813a883c428..874437311d086 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java @@ -54,7 +54,7 @@ public OpenPointInTimeRequest(StreamInput in) throws IOException { this.keepAlive = in.readTimeValue(); this.routing = in.readOptionalString(); this.preference = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { this.maxConcurrentShardRequests = in.readVInt(); } if (in.getTransportVersion().onOrAfter(TransportVersions.PIT_WITH_INDEX_FILTER)) { @@ -70,7 +70,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeTimeValue(keepAlive); out.writeOptionalString(routing); out.writeOptionalString(preference); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeVInt(maxConcurrentShardRequests); } if (out.getTransportVersion().onOrAfter(TransportVersions.PIT_WITH_INDEX_FILTER)) { diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 9c80e55a6f49d..e42ac1f4794ff 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -716,7 +716,7 @@ Map createFinalResponse() { final String[] indices = entry.getValue().indices(); final Executor responseExecutor = transportService.getThreadPool().executor(ThreadPool.Names.SEARCH_COORDINATION); // TODO: support point-in-time - if (searchContext == null && connection.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (searchContext == null && connection.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { SearchShardsRequest searchShardsRequest = new SearchShardsRequest( indices, indicesOptions, diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java index bf8376cfc5481..312a9843c9e2b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java @@ -46,7 +46,7 @@ public BroadcastUnpromotableRequest(StreamInput in) throws IOException { indexShardRoutingTable = null; shardId = new ShardId(in); indices = new String[] { shardId.getIndex().getName() }; - failShardOnError = in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) && in.readBoolean(); + failShardOnError = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) && in.readBoolean(); } public BroadcastUnpromotableRequest(IndexShardRoutingTable indexShardRoutingTable) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 84db5887b5926..ff31c6fe950d7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -907,7 +907,7 @@ public DataStream(StreamInput in) throws IOException { in.readBoolean(), in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) ? in.readBoolean() : false, in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null, - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null, + in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null, in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? in.readBoolean() : false, in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION) ? readIndices(in) : List.of(), in.getTransportVersion().onOrAfter(TransportVersions.LAZY_ROLLOVER_ADDED) ? in.readBoolean() : false @@ -944,7 +944,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeOptionalEnum(indexMode); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(lifecycle); } if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index 83a5d99c8f348..a08b4e878ad9b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -187,7 +187,7 @@ public int hashCode() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(dataRetention); } if (out.getTransportVersion().onOrAfter(ADDED_ENABLED_FLAG_VERSION)) { @@ -197,7 +197,7 @@ public void writeTo(StreamOutput out) throws IOException { } public DataStreamLifecycle(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { dataRetention = in.readOptionalWriteable(Retention::read); } else { dataRetention = null; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java index aaf256a49a0a5..3453b3b6d70ff 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java @@ -35,8 +35,8 @@ public class SingleNodeShutdownMetadata implements SimpleDiffable, ToXContentObject { public static final TransportVersion REPLACE_SHUTDOWN_TYPE_ADDED_VERSION = TransportVersions.V_7_16_0; - public static final TransportVersion SIGTERM_ADDED_VERSION = TransportVersions.V_8_500_020; - public static final TransportVersion GRACE_PERIOD_ADDED_VERSION = TransportVersions.V_8_500_020; + public static final TransportVersion SIGTERM_ADDED_VERSION = TransportVersions.V_8_9_X; + public static final TransportVersion GRACE_PERIOD_ADDED_VERSION = TransportVersions.V_8_9_X; public static final ParseField NODE_ID_FIELD = new ParseField("node_id"); public static final ParseField TYPE_FIELD = new ParseField("type"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index d36b70b49c6ab..18a99f984707f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -123,7 +123,7 @@ public Template(StreamInput in) throws IOException { } if (in.getTransportVersion().onOrAfter(DataStreamLifecycle.ADDED_ENABLED_FLAG_VERSION)) { this.lifecycle = in.readOptionalWriteable(DataStreamLifecycle::new); - } else if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + } else if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { boolean isExplicitNull = in.readBoolean(); if (isExplicitNull) { this.lifecycle = DataStreamLifecycle.newBuilder().enabled(false).build(); @@ -177,7 +177,7 @@ public void writeTo(StreamOutput out) throws IOException { } if (out.getTransportVersion().onOrAfter(DataStreamLifecycle.ADDED_ENABLED_FLAG_VERSION)) { out.writeOptionalWriteable(lifecycle); - } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { boolean isExplicitNull = lifecycle != null && lifecycle.isEnabled() == false; out.writeBoolean(isExplicitNull); if (isExplicitNull == false) { diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index cd2c927d87f69..918056fea9ec6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -667,7 +667,7 @@ public String shortSummary() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(masterNodeId); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeVLong(nodeLeftGeneration); } // else nodeLeftGeneration is zero, or we're sending this to a remote cluster which does not care about the nodeLeftGeneration out.writeCollection(nodes.values()); @@ -682,7 +682,7 @@ public static DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) t builder.localNodeId(localNode.getId()); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { builder.nodeLeftGeneration(in.readVLong()); } // else nodeLeftGeneration is zero, or we're receiving this from a remote cluster so the nodeLeftGeneration does not matter to us diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java index 722779a646824..b9cce9e3ec500 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java @@ -28,7 +28,7 @@ public class RepositoriesStats implements Writeable, ToXContentFragment { private final Map repositoryThrottlingStats; public RepositoriesStats(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { repositoryThrottlingStats = in.readMap(ThrottlingStats::new); } else { repositoryThrottlingStats = new HashMap<>(); @@ -41,7 +41,7 @@ public RepositoriesStats(Map repositoryThrottlingStats) @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeMap(repositoryThrottlingStats, StreamOutput::writeWriteable); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java index 8f5d3c1b9f322..c3816bef6f0aa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java @@ -64,7 +64,7 @@ public MedianAbsoluteDeviationAggregationBuilder(String name) { public MedianAbsoluteDeviationAggregationBuilder(StreamInput in) throws IOException { super(in); compression = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { executionHint = in.readOptionalWriteable(TDigestExecutionHint::readFrom); } else { executionHint = TDigestExecutionHint.HIGH_ACCURACY; @@ -120,7 +120,7 @@ protected ValuesSourceType defaultValueSourceType() { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDouble(compression); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(executionHint); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java index d946ce3e14ea1..fedae36be0263 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java @@ -130,7 +130,7 @@ public TDigest(double compression, TDigestExecutionHint executionHint) { TDigest(StreamInput in) throws IOException { this( in.readDouble(), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) ? in.readOptionalWriteable(TDigestExecutionHint::readFrom) : TDigestExecutionHint.HIGH_ACCURACY ); @@ -235,7 +235,7 @@ public InternalNumericMetricsAggregation.MultiValue createEmptyPercentileRanksAg public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeDouble(compression); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(executionHint); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java index 23c26794f6bb5..0d0ed21556a92 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java @@ -107,7 +107,7 @@ public final double compression() { public static void write(TDigestState state, StreamOutput out) throws IOException { out.writeDouble(state.compression); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeString(state.type.toString()); out.writeVLong(state.tdigest.size()); } @@ -123,7 +123,7 @@ public static TDigestState read(StreamInput in) throws IOException { double compression = in.readDouble(); TDigestState state; long size = 0; - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { state = new TDigestState(Type.valueOf(in.readString()), compression); size = in.readVLong(); } else { diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 0211e43933ec3..bc4b2a85bab68 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -219,7 +219,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { indexBoosts = in.readCollectionAsList(IndexBoost::new); minScore = in.readOptionalFloat(); postQueryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { subSearchSourceBuilders = in.readCollectionAsList(SubSearchSourceBuilder::new); } else { QueryBuilder queryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); @@ -289,7 +289,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(indexBoosts); out.writeOptionalFloat(minScore); out.writeOptionalNamedWriteable(postQueryBuilder); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeCollection(subSearchSourceBuilders); } else if (out.getTransportVersion().before(TransportVersions.V_8_4_0) && subSearchSourceBuilders.size() >= 2) { throw new IllegalArgumentException("cannot serialize [sub_searches] to version [" + out.getTransportVersion() + "]"); diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 18ae708d8fec3..2023ee2e8d4b6 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -284,8 +284,7 @@ public ShardSearchRequest(StreamInput in) throws IOException { numberOfShards = in.readVInt(); scroll = in.readOptionalWriteable(Scroll::new); source = in.readOptionalWriteable(SearchSourceBuilder::new); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) - && in.getTransportVersion().before(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) && in.getTransportVersion().before(TransportVersions.V_8_9_X)) { // to deserialize between the 8.8 and 8.500.020 version we need to translate // the rank queries into sub searches if we are ranking; if there are no rank queries // we deserialize the empty list and do nothing @@ -360,8 +359,7 @@ protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOExce } out.writeOptionalWriteable(scroll); out.writeOptionalWriteable(source); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) - && out.getTransportVersion().before(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) && out.getTransportVersion().before(TransportVersions.V_8_9_X)) { // to serialize between the 8.8 and 8.500.020 version we need to translate // the sub searches into rank queries if we are ranking, otherwise, we // ignore this because linear combination will have multiple sub searches in diff --git a/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java index 7e1e7de03e288..91bf1059225d8 100644 --- a/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java @@ -96,7 +96,7 @@ protected OpenPointInTimeRequest mutateInstance(OpenPointInTimeRequest in) throw } public void testUseDefaultConcurrentForOldVersion() throws Exception { - TransportVersion previousVersion = TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_500_020); + TransportVersion previousVersion = TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_9_X); try (BytesStreamOutput output = new BytesStreamOutput()) { TransportVersion version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_0_0, previousVersion); output.setTransportVersion(version); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 8c0ffeabf0ea6..6d66a1fcd3847 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -154,7 +154,7 @@ public void testRandomVersionSerialization() throws IOException { // Versions before 8.8 don't support rank searchRequest.source().rankBuilder(null); } - if (version.before(TransportVersions.V_8_500_020) && searchRequest.source() != null) { + if (version.before(TransportVersions.V_8_9_X) && searchRequest.source() != null) { // Versions before 8_500_999 don't support queries searchRequest.source().subSearches(new ArrayList<>()); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java index f242e19012a35..0fe660e56822c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java @@ -194,7 +194,7 @@ public void testSerialization() throws IOException { backwardsCompatible.add(i); } - TDigestState serialized = writeToAndReadFrom(state, TransportVersions.V_8_500_020); + TDigestState serialized = writeToAndReadFrom(state, TransportVersions.V_8_9_X); assertEquals(serialized, state); TDigestState serializedBackwardsCompatible = writeToAndReadFrom(state, TransportVersions.V_8_8_1); diff --git a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java index ed92bdb1e5919..b16e8f68c7e32 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java @@ -237,7 +237,7 @@ public void testChannelVersion() throws Exception { version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_8_0, TransportVersion.current()); } if (request.source() != null && request.source().subSearches().size() >= 2) { - version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_500_020, TransportVersion.current()); + version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_9_X, TransportVersion.current()); } request = copyWriteable(request, namedWriteableRegistry, ShardSearchRequest::new, version); channelVersion = TransportVersion.min(channelVersion, version); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java index b0e8b8ae05b51..61917220f10d1 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java @@ -83,7 +83,7 @@ protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBu public BoxplotAggregationBuilder(StreamInput in) throws IOException { super(in); compression = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { executionHint = in.readOptionalWriteable(TDigestExecutionHint::readFrom); } else { executionHint = TDigestExecutionHint.HIGH_ACCURACY; @@ -98,7 +98,7 @@ public Set metricNames() { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDouble(compression); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeOptionalWriteable(executionHint); } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java index f3af195bc6fa1..dc4b096f3a08e 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java @@ -63,7 +63,7 @@ public InternalResetTrackingRate(StreamInput in) throws IOException { this.startTime = in.readLong(); this.endTime = in.readLong(); this.resetCompensation = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { this.rateUnit = Rounding.DateTimeUnit.resolve(in.readByte()); } else { this.rateUnit = Rounding.DateTimeUnit.SECOND_OF_MINUTE; @@ -82,7 +82,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeLong(startTime); out.writeLong(endTime); out.writeDouble(resetCompensation); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) && rateUnit != null) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X) && rateUnit != null) { out.writeByte(rateUnit.getId()); } else { out.writeByte(Rounding.DateTimeUnit.SECOND_OF_MINUTE.getId()); diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java index 7eb3cca18efd0..6bb3bd5fe14f6 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java @@ -980,7 +980,7 @@ public static class ReactiveReason implements AutoscalingDeciderResult.Reason { static final int MAX_AMOUNT_OF_SHARDS = 512; private static final TransportVersion SHARD_IDS_OUTPUT_VERSION = TransportVersions.V_8_4_0; - private static final TransportVersion UNASSIGNED_NODE_DECISIONS_OUTPUT_VERSION = TransportVersions.V_8_500_020; + private static final TransportVersion UNASSIGNED_NODE_DECISIONS_OUTPUT_VERSION = TransportVersions.V_8_9_X; private final String reason; private final long unassigned; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 7e0e2d1493417..4a3a92aa80bc8 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -137,7 +137,7 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E public static final String CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY = "remote_cluster_name"; public static final String REQUESTED_OPS_MISSING_METADATA_KEY = "es.requested_operations_missing"; - public static final TransportVersion TRANSPORT_VERSION_ACTION_WITH_SHARD_ID = TransportVersions.V_8_500_020; + public static final TransportVersion TRANSPORT_VERSION_ACTION_WITH_SHARD_ID = TransportVersions.V_8_9_X; private final boolean enabled; private final Settings settings; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java index edac3498ca4e4..91cce4126d3a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java @@ -48,7 +48,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_8_500_020; + return TransportVersions.V_8_9_X; } @Override @@ -112,7 +112,7 @@ public LifecycleStats( } public static LifecycleStats read(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { return new LifecycleStats(in.readVLong(), in.readVLong(), in.readVLong(), in.readDouble(), in.readBoolean()); } else { return INITIAL; @@ -121,7 +121,7 @@ public static LifecycleStats read(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeVLong(dataStreamsWithLifecyclesCount); out.writeVLong(minRetentionMillis); out.writeVLong(maxRetentionMillis); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java index 12fba46e40689..c316e130ecb81 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java @@ -79,7 +79,7 @@ public Request(StreamInput in) throws IOException { advanceTime = in.readOptionalString(); skipTime = in.readOptionalString(); waitForNormalization = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { refreshRequired = in.readBoolean(); } } @@ -93,7 +93,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(advanceTime); out.writeOptionalString(skipTime); out.writeBoolean(waitForNormalization); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(refreshRequired); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java index c153cbc2c039b..995b4446a19a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java @@ -86,7 +86,7 @@ public Request(StreamInput in) throws IOException { } else { this.merges = List.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { this.scores = in.readCollectionAsList(StreamInput::readDouble); } else { this.scores = List.of(); @@ -136,7 +136,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { out.writeStringCollection(merges); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeCollection(scores, StreamOutput::writeDouble); } if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java index 883c94093a2c5..2254959242eab 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java @@ -66,7 +66,7 @@ public FlushAcknowledgement(String id, Instant lastFinalizedBucketEnd, Boolean r public FlushAcknowledgement(StreamInput in) throws IOException { id = in.readString(); lastFinalizedBucketEnd = in.readOptionalInstant(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { refreshRequired = in.readBoolean(); } else { refreshRequired = true; @@ -77,7 +77,7 @@ public FlushAcknowledgement(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeOptionalInstant(lastFinalizedBucketEnd); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(refreshRequired); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java index 7596fe75b4173..43968e90485b3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java @@ -134,7 +134,7 @@ public AsyncStatusResponse(StreamInput in) throws IOException { this.skippedShards = in.readVInt(); this.failedShards = in.readVInt(); this.completionStatus = (this.isRunning == false) ? RestStatus.readFrom(in) : null; - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { this.clusters = in.readOptionalWriteable(SearchResponse.Clusters::new); } else { this.clusters = null; @@ -160,7 +160,7 @@ public void writeTo(StreamOutput out) throws IOException { if (isRunning == false) { RestStatus.writeTo(out, completionStatus); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { // optional since only CCS uses is; it is null for local-only searches out.writeOptionalWriteable(clusters); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java index e57570ce7385b..5753fa3b4ad7a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java @@ -46,7 +46,7 @@ */ public final class ApiKey implements ToXContentObject, Writeable { - public static final TransportVersion CROSS_CLUSTER_KEY_VERSION = TransportVersions.V_8_500_020; + public static final TransportVersion CROSS_CLUSTER_KEY_VERSION = TransportVersions.V_8_9_X; public enum Type { /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index f39eca877432c..2857cbfd1bdd2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -52,7 +52,7 @@ */ public class RoleDescriptor implements ToXContentObject, Writeable { - public static final TransportVersion WORKFLOWS_RESTRICTION_VERSION = TransportVersions.V_8_500_020; + public static final TransportVersion WORKFLOWS_RESTRICTION_VERSION = TransportVersions.V_8_9_X; public static final String ROLE_TYPE = "role"; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java index a369219bd7c3c..6d85e90dc3108 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java @@ -53,7 +53,7 @@ protected Writeable.Reader instanceReader() { @Override protected Request mutateInstanceForVersion(Request instance, TransportVersion version) { - if (version.before(TransportVersions.V_8_500_020)) { + if (version.before(TransportVersions.V_8_9_X)) { instance.setRefreshRequired(true); } return instance; diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java index edbeb3d0a0d8c..c616588fe09bb 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java @@ -299,10 +299,10 @@ private List mutateEvents(List original, TransportVersion version) public void testEmptyIndexAsMissingEvent() throws IOException { Event event = new Event("", "", new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), null); BytesStreamOutput out = new BytesStreamOutput(); - out.setTransportVersion(TransportVersions.V_8_500_020);// 8.9.1 + out.setTransportVersion(TransportVersions.V_8_9_X);// 8.9.1 event.writeTo(out); ByteArrayStreamInput in = new ByteArrayStreamInput(out.bytes().array()); - in.setTransportVersion(TransportVersions.V_8_500_020); + in.setTransportVersion(TransportVersions.V_8_9_X); Event event2 = Event.readFrom(in); assertTrue(event2.missing()); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java index 11b699df66b83..0bfc64c9b0027 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java @@ -69,7 +69,7 @@ public Vocabulary(StreamInput in) throws IOException { } else { merges = List.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { scores = in.readCollectionAsList(StreamInput::readDouble); } else { scores = List.of(); @@ -95,7 +95,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { out.writeStringCollection(merges); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeCollection(scores, StreamOutput::writeDouble); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java index 939ccde7df6c4..015614e56c02b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java @@ -27,7 +27,7 @@ public class TransportVersionUtilsTests extends ESTestCase { "Bertram", new CompatibilityVersions(TransportVersions.V_7_0_1, Map.of()), "Charles", - new CompatibilityVersions(TransportVersions.V_8_500_020, Map.of()), + new CompatibilityVersions(TransportVersions.V_8_9_X, Map.of()), "Dominic", new CompatibilityVersions(TransportVersions.V_8_0_0, Map.of()) ); @@ -79,6 +79,6 @@ public void testIsMinTransformVersionSameAsCurrent() { public void testIsMinTransportVersionOnOrAfter() { assertThat(TransportVersionUtils.isMinTransportVersionOnOrAfter(state, TransportVersions.V_7_0_0), equalTo(true)); - assertThat(TransportVersionUtils.isMinTransportVersionOnOrAfter(state, TransportVersions.V_8_500_020), equalTo(false)); + assertThat(TransportVersionUtils.isMinTransportVersionOnOrAfter(state, TransportVersions.V_8_9_X), equalTo(false)); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 2f646631d14cd..ff3acbc122501 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -254,7 +254,7 @@ private static DiscoveryNode addAnotherPre8500DataNode(ClusterService clusterSer transportVersion = TransportVersions.V_8_8_1; } else { version = Version.V_8_9_0; - transportVersion = TransportVersions.V_8_500_020; + transportVersion = TransportVersions.V_8_9_X; } return addAnotherDataNodeWithVersion(clusterService, version, transportVersion); } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java index 0de11109e33e7..d940f366ef942 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java @@ -88,7 +88,7 @@ public InternalGeoLine(StreamInput in) throws IOException { this.includeSorts = in.readBoolean(); this.sortOrder = SortOrder.readFromStream(in); this.size = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { nonOverlapping = in.readBoolean(); simplified = in.readBoolean(); } else { @@ -105,7 +105,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeBoolean(includeSorts); sortOrder.writeTo(out); out.writeVInt(size); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) { out.writeBoolean(nonOverlapping); out.writeBoolean(simplified); } From 0acff18c57634b38cca48e37ad794df406102590 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 17 Jan 2024 11:08:31 +0100 Subject: [PATCH 68/95] Set read timeout for fetching IMDSv2 token (#104407) Resubmit of #104397 without setting AWS_METADATA_SERVICE_TIMEOUT randomly in the build. ``` Use the timeout set by AWS_METADATA_SERVICE_TIMEOUT environment variable both as connect and read timeout analogous to the AWS SDK. See https://docs.aws.amazon.com/sdkref/latest/guide/feature-ec2-instance-metadata.html ``` Resolves #104244 --- docs/changelog/104407.yaml | 6 ++++++ .../elasticsearch/discovery/ec2/AwsEc2Utils.java | 14 ++++++++++++-- .../discovery/ec2/Ec2DiscoveryPluginTests.java | 13 +++++++++++++ 3 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/104407.yaml diff --git a/docs/changelog/104407.yaml b/docs/changelog/104407.yaml new file mode 100644 index 0000000000000..1ce6b6f97f580 --- /dev/null +++ b/docs/changelog/104407.yaml @@ -0,0 +1,6 @@ +pr: 104407 +summary: Set read timeout for fetching IMDSv2 token +area: Discovery-Plugins +type: enhancement +issues: + - 104244 diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java index 256a5516a2ef2..b2475216a9ce7 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2Utils.java @@ -8,6 +8,9 @@ package org.elasticsearch.discovery.ec2; +import com.amazonaws.SDKGlobalConfiguration; +import com.amazonaws.util.StringUtils; + import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; @@ -24,7 +27,11 @@ class AwsEc2Utils { private static final Logger logger = LogManager.getLogger(AwsEc2Utils.class); - private static final int CONNECT_TIMEOUT = 2000; + // The timeout can be configured via the AWS_METADATA_SERVICE_TIMEOUT environment variable + private static final int TIMEOUT = Optional.ofNullable(System.getenv(SDKGlobalConfiguration.AWS_METADATA_SERVICE_TIMEOUT_ENV_VAR)) + .filter(StringUtils::hasValue) + .map(s -> Integer.parseInt(s) * 1000) + .orElse(2000); private static final int METADATA_TOKEN_TTL_SECONDS = 10; static final String X_AWS_EC_2_METADATA_TOKEN = "X-aws-ec2-metadata-token"; @@ -39,7 +46,10 @@ static Optional getMetadataToken(String metadataTokenUrl) { try { urlConnection = (HttpURLConnection) new URL(metadataTokenUrl).openConnection(); urlConnection.setRequestMethod("PUT"); - urlConnection.setConnectTimeout(CONNECT_TIMEOUT); + // Use both timeout for connect and read timeout analogous to AWS SDK. + // See com.amazonaws.internal.HttpURLConnection#connectToEndpoint + urlConnection.setConnectTimeout(TIMEOUT); + urlConnection.setReadTimeout(TIMEOUT); urlConnection.setRequestProperty("X-aws-ec2-metadata-token-ttl-seconds", String.valueOf(METADATA_TOKEN_TTL_SECONDS)); } catch (IOException e) { logger.warn("Unable to access the IMDSv2 URI: " + metadataTokenUrl, e); diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java index b9bea564e2720..41b848954b551 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginTests.java @@ -121,6 +121,19 @@ public void testTokenMetadataApiIsMisbehaving() throws Exception { } } + public void testTokenMetadataApiDoesNotRespond() throws Exception { + try (var metadataServer = new MetadataServer("/metadata", exchange -> { + assertNull(exchange.getRequestHeaders().getFirst("X-aws-ec2-metadata-token")); + exchange.sendResponseHeaders(200, 0); + exchange.getResponseBody().write("us-east-1c".getBytes(StandardCharsets.UTF_8)); + exchange.close(); + }, "/latest/api/token", ex -> { + // Intentionally don't close the connection, so the client has to time out + })) { + assertNodeAttributes(Settings.EMPTY, metadataServer.metadataUri(), metadataServer.tokenUri(), "us-east-1c"); + } + } + public void testTokenMetadataApiIsNotAvailable() throws Exception { try (var metadataServer = metadataServerWithoutToken()) { assertNodeAttributes(Settings.EMPTY, metadataServer.metadataUri(), metadataServer.tokenUri(), "us-east-1c"); From 76d1aa04738e337bafb23fd15bfbe674ab2e38dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Wed, 17 Jan 2024 11:24:02 +0100 Subject: [PATCH 69/95] Mute failing 3rd-party-deployment test in fips mode (#104447) * Mute failing 3rd-party-deployment test in fips mode --- .../resources/rest-api-spec/test/ml/3rd_party_deployment.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml index fdccf473b358a..69b676c92ed72 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml @@ -218,6 +218,9 @@ setup: --- "Test start deployment fails while model download in progress": + - skip: + features: fips_140 + reason: "@AwaitsFix https://github.com/elastic/elasticsearch/issues/104414" - do: ml.put_trained_model: model_id: .elser_model_2 From 59e8aa887f54f33737ced1b11031fe76cc985948 Mon Sep 17 00:00:00 2001 From: Simon Cooper Date: Wed, 17 Jan 2024 10:48:00 +0000 Subject: [PATCH 70/95] Rename transport version constant used for 8.10 releases (#104403) --- server/src/main/java/org/elasticsearch/Build.java | 9 ++++----- .../main/java/org/elasticsearch/TransportVersions.java | 2 +- .../action/admin/cluster/stats/AnalysisStats.java | 2 +- .../admin/indices/analyze/ReloadAnalyzersRequest.java | 2 +- .../action/admin/indices/stats/CommonStats.java | 2 +- .../action/downsample/DownsampleAction.java | 4 ++-- .../org/elasticsearch/action/index/IndexRequest.java | 2 +- .../org/elasticsearch/action/search/SearchResponse.java | 4 ++-- .../cluster/metadata/DataStreamLifecycle.java | 2 +- .../org/elasticsearch/cluster/node/DiscoveryNode.java | 4 ++-- .../elasticsearch/index/query/MatchNoneQueryBuilder.java | 4 ++-- .../index/query/SimpleQueryStringBuilder.java | 2 +- .../elasticsearch/index/query/TermsSetQueryBuilder.java | 2 +- .../src/main/java/org/elasticsearch/tasks/TaskInfo.java | 4 ++-- .../transport/RemoteClusterPortSettings.java | 2 +- .../elasticsearch/search/query/ThrowingQueryBuilder.java | 4 ++-- .../org/elasticsearch/test/errorquery/IndexError.java | 4 ++-- .../application/EnterpriseSearchFeatureSetUsage.java | 2 +- .../xpack/core/downsample/DownsampleIndexerAction.java | 4 ++-- .../xpack/core/downsample/DownsampleShardStatus.java | 4 ++-- .../elasticsearch/xpack/core/ilm/DownsampleAction.java | 4 ++-- .../ml/action/PutTrainedModelDefinitionPartAction.java | 4 ++-- .../core/ml/action/PutTrainedModelVocabularyAction.java | 4 ++-- .../xpack/core/search/action/AsyncStatusResponse.java | 4 ++-- .../core/security/action/apikey/GetApiKeyRequest.java | 2 +- .../core/security/action/user/AuthenticateResponse.java | 2 +- .../PutTrainedModelDefinitionPartActionRequestTests.java | 2 +- .../xpack/downsample/DownsampleShardTaskParams.java | 2 +- .../xpack/application/rules/QueryRuleCriteria.java | 2 +- .../xpack/application/rules/QueryRulesetListItem.java | 2 +- .../xpack/application/rules/RuleQueryBuilder.java | 2 +- .../xpack/eql/action/EqlSearchResponse.java | 4 ++-- .../xpack/eql/action/EqlSearchResponseTests.java | 2 +- .../elasticsearch/xpack/security/authc/TokenService.java | 2 +- 34 files changed, 51 insertions(+), 52 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 0b8cd149744e3..89082389c5805 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -204,8 +204,7 @@ static URL getElasticsearchCodeSourceLocation() { public static Build readBuild(StreamInput in) throws IOException { final String flavor; - if (in.getTransportVersion().before(TransportVersions.V_8_3_0) - || in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().before(TransportVersions.V_8_3_0) || in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { flavor = in.readString(); } else { flavor = "default"; @@ -235,7 +234,7 @@ public static Build readBuild(StreamInput in) throws IOException { version = versionMatcher.group(1); qualifier = versionMatcher.group(2); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { minWireVersion = in.readString(); minIndexVersion = in.readString(); displayString = in.readString(); @@ -252,7 +251,7 @@ public static Build readBuild(StreamInput in) throws IOException { public static void writeBuild(Build build, StreamOutput out) throws IOException { if (out.getTransportVersion().before(TransportVersions.V_8_3_0) - || out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + || out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeString(build.flavor()); } out.writeString(build.type().displayName()); @@ -266,7 +265,7 @@ public static void writeBuild(Build build, StreamOutput out) throws IOException out.writeBoolean(build.isSnapshot()); out.writeString(build.qualifiedVersion()); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeString(build.minWireCompatVersion()); out.writeString(build.minIndexCompatVersion()); out.writeString(build.displayString()); diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index c2cd67c88edbe..531ad7d0f7ba9 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -93,7 +93,7 @@ static TransportVersion def(int id) { * Detached transport versions added below here. */ public static final TransportVersion V_8_9_X = def(8_500_020); - public static final TransportVersion V_8_500_061 = def(8_500_061); + public static final TransportVersion V_8_10_X = def(8_500_061); public static final TransportVersion V_8_500_062 = def(8_500_062); public static final TransportVersion V_8_500_063 = def(8_500_063); public static final TransportVersion V_8_500_064 = def(8_500_064); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java index 81a26999d2907..9105c20044223 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java @@ -42,7 +42,7 @@ */ public final class AnalysisStats implements ToXContentFragment, Writeable { - private static final TransportVersion SYNONYM_SETS_VERSION = TransportVersions.V_8_500_061; + private static final TransportVersion SYNONYM_SETS_VERSION = TransportVersions.V_8_10_X; private static final Set SYNONYM_FILTER_TYPES = Set.of("synonym", "synonym_graph"); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java index e2894f072011c..be33fada9c934 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java @@ -25,7 +25,7 @@ public class ReloadAnalyzersRequest extends BroadcastRequest implements DocWriteRequest, CompositeIndicesRequest { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(IndexRequest.class); - private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_500_061; + private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_10_X; /** * Max length of the source document to include into string() diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 660fdb38b130b..a9943d7b43397 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -632,7 +632,7 @@ public Clusters(StreamInput in) throws IOException { this.total = in.readVInt(); int successfulTemp = in.readVInt(); int skippedTemp = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { List clusterList = in.readCollectionAsList(Cluster::new); if (clusterList.isEmpty()) { this.clusterInfo = Collections.emptyMap(); @@ -685,7 +685,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(total); out.writeVInt(successful); out.writeVInt(skipped); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { if (clusterInfo != null) { List clusterList = clusterInfo.values().stream().toList(); out.writeCollection(clusterList); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index a08b4e878ad9b..215ed515748ab 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -48,7 +48,7 @@ public class DataStreamLifecycle implements SimpleDiffable, ToXContentObject { // Versions over the wire - public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_500_061; + public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_10_X; public static final String DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME = "data_streams.lifecycle_only.mode"; diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index e77a7b27e1a2c..01b67068db31f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -337,7 +337,7 @@ public DiscoveryNode(StreamInput in) throws IOException { } } this.roles = Collections.unmodifiableSortedSet(roles); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { versionInfo = new VersionInformation(Version.readVersion(in), IndexVersion.readVersion(in), IndexVersion.readVersion(in)); } else { versionInfo = inferVersionInformation(Version.readVersion(in)); @@ -374,7 +374,7 @@ public void writeTo(StreamOutput out) throws IOException { o.writeString(role.roleNameAbbreviation()); o.writeBoolean(role.canContainData()); }); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { Version.writeVersion(versionInfo.nodeVersion(), out); IndexVersion.writeVersion(versionInfo.minIndexVersion(), out); IndexVersion.writeVersion(versionInfo.maxIndexVersion(), out); diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java index 04ae0bb498841..47e4cf7273703 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java @@ -39,14 +39,14 @@ public MatchNoneQueryBuilder(String rewriteReason) { */ public MatchNoneQueryBuilder(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { rewriteReason = in.readOptionalString(); } } @Override protected void doWriteTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { out.writeOptionalString(rewriteReason); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index 5a2b01838e27b..63cd598caa784 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -91,7 +91,7 @@ public final class SimpleQueryStringBuilder extends AbstractQueryBuilder instanceReader() { @Override protected Request mutateInstanceForVersion(Request instance, TransportVersion version) { - if (version.before(TransportVersions.V_8_500_061)) { + if (version.before(TransportVersions.V_8_10_X)) { return new Request( instance.getModelId(), instance.getDefinition(), diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java index 34b7d3c90b267..813dcc8c8d5a4 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java @@ -91,7 +91,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_8_500_061; + return TransportVersions.V_8_10_X; } @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java index ef42a7d7c64f2..5b07d81d90df0 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java @@ -37,7 +37,7 @@ public class QueryRuleCriteria implements Writeable, ToXContentObject { - public static final TransportVersion CRITERIA_METADATA_VALUES_TRANSPORT_VERSION = TransportVersions.V_8_500_061; + public static final TransportVersion CRITERIA_METADATA_VALUES_TRANSPORT_VERSION = TransportVersions.V_8_10_X; private final QueryRuleCriteriaType criteriaType; private final String criteriaMetadata; private final List criteriaValues; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java index fcd0f6be8fbcb..f3bc07387512f 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java @@ -27,7 +27,7 @@ */ public class QueryRulesetListItem implements Writeable, ToXContentObject { - public static final TransportVersion EXPANDED_RULESET_COUNT_TRANSPORT_VERSION = TransportVersions.V_8_500_061; + public static final TransportVersion EXPANDED_RULESET_COUNT_TRANSPORT_VERSION = TransportVersions.V_8_10_X; public static final ParseField RULESET_ID_FIELD = new ParseField("ruleset_id"); public static final ParseField RULE_TOTAL_COUNT_FIELD = new ParseField("rule_total_count"); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java index b23ed92a5d9b8..3882b6c61bb2c 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java @@ -73,7 +73,7 @@ public class RuleQueryBuilder extends AbstractQueryBuilder { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_8_500_061; + return TransportVersions.V_8_10_X; } public RuleQueryBuilder(QueryBuilder organicQuery, Map matchCriteria, String rulesetId) { diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java index 2d7a330560fcc..f9f9238b6c4ab 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java @@ -281,7 +281,7 @@ private Event(StreamInput in) throws IOException { } else { fetchFields = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { missing = in.readBoolean(); } else { missing = index.isEmpty(); @@ -304,7 +304,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(fetchFields, StreamOutput::writeWriteable); } } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_10_X)) { // for BWC, 8.9.1+ does not have "missing" attribute, but it considers events with an empty index "" as missing events // see https://github.com/elastic/elasticsearch/pull/98130 out.writeBoolean(missing); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java index c616588fe09bb..255e94d6bda34 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java @@ -289,7 +289,7 @@ private List mutateEvents(List original, TransportVersion version) e.id(), e.source(), version.onOrAfter(TransportVersions.V_7_13_0) ? e.fetchFields() : null, - version.onOrAfter(TransportVersions.V_8_500_061) ? e.missing() : e.index().isEmpty() + version.onOrAfter(TransportVersions.V_8_10_X) ? e.missing() : e.index().isEmpty() ) ); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 5a8b228a1145c..26f6268aaa5dc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -210,7 +210,7 @@ public class TokenService { static final TransportVersion VERSION_ACCESS_TOKENS_AS_UUIDS = TransportVersions.V_7_2_0; static final TransportVersion VERSION_MULTIPLE_CONCURRENT_REFRESHES = TransportVersions.V_7_2_0; static final TransportVersion VERSION_CLIENT_AUTH_FOR_REFRESH = TransportVersions.V_8_2_0; - static final TransportVersion VERSION_GET_TOKEN_DOC_FOR_REFRESH = TransportVersions.V_8_500_061; + static final TransportVersion VERSION_GET_TOKEN_DOC_FOR_REFRESH = TransportVersions.V_8_10_X; private static final Logger logger = LogManager.getLogger(TokenService.class); From 52aefa59ebfd287f1458005dd6653a48307b25f0 Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Wed, 17 Jan 2024 11:50:29 +0100 Subject: [PATCH 71/95] [DOCS] Ingest processors docs improvements (#104384) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [DOCS] Categorize ingest processors on overview page, summarize use cases * Add overview info, subheading, links * Apply suggestions from review Co-authored-by: István Zoltán Szabó * Insert space --------- Co-authored-by: István Zoltán Szabó --- docs/reference/ingest/processors.asciidoc | 194 +++++++++++++++++++++- 1 file changed, 191 insertions(+), 3 deletions(-) diff --git a/docs/reference/ingest/processors.asciidoc b/docs/reference/ingest/processors.asciidoc index 4132773e3d427..8622e0b98602c 100644 --- a/docs/reference/ingest/processors.asciidoc +++ b/docs/reference/ingest/processors.asciidoc @@ -4,7 +4,15 @@ Processor reference ++++ -{es} includes several configurable processors. To get a list of available +An <> is made up of a sequence of processors that are applied to documents as they are ingested into an index. +Each processor performs a specific task, such as filtering, transforming, or enriching data. + +Each successive processor depends on the output of the previous processor, so the order of processors is important. +The modified documents are indexed into {es} after all processors are applied. + +{es} includes over 40 configurable processors. +The subpages in this section contain reference documentation for each processor. +To get a list of available processors, use the <> API. [source,console] @@ -12,11 +20,191 @@ processors, use the <> API. GET _nodes/ingest?filter_path=nodes.*.ingest.processors ---- -The pages in this section contain reference documentation for each processor. +[discrete] +[[ingest-processors-categories]] +=== Ingest processors by category + +We've categorized the available processors on this page and summarized their functions. +This will help you find the right processor for your use case. + +* <> +* <> +* <> +* <> +* <> + +[discrete] +[[ingest-process-category-data-enrichment]] +=== Data enrichment processors + +[discrete] +[[ingest-process-category-data-enrichment-general]] +==== General outcomes + +<>:: +Appends a value to a field. + +<>:: +Points documents to the right time-based index based on a date or timestamp field. + +<>:: +Enriches documents with data from another index. +[TIP] +==== +Refer to <> for detailed examples of how to use the `enrich` processor to add data from your existing indices to incoming documents during ingest. +==== + +<>:: +Uses {ml} to classify and tag text fields. + +[discrete] +[[ingest-process-category-data-enrichment-specific]] +==== Specific outcomes + +<>:: +Parses and indexes binary data, such as PDFs and Word documents. + +<>:: +Converts a location field to a Geo-Point field. + +<>:: +Computes the Community ID for network flow data. + +<>:: +Computes a hash of the document’s content. + +<>:: +Converts geo-grid definitions of grid tiles or cells to regular bounding boxes or polygons which describe their shape. + +<>:: +Adds information about the geographical location of an IPv4 or IPv6 address. + +<>:: +Calculates the network direction given a source IP address, destination IP address, and a list of internal networks. + +<>:: +Extracts the registered domain (also known as the effective top-level domain or eTLD), sub-domain, and top-level domain from a fully qualified domain name (FQDN). + +<>:: +Sets user-related details (such as `username`, `roles`, `email`, `full_name`,`metadata`, `api_key`, `realm` and `authentication_type`) from the current authenticated user to the current document by pre-processing the ingest. + +<>:: +Parses a Uniform Resource Identifier (URI) string and extracts its components as an object. + +<>:: +URL-decodes a string. + +<>:: +Parses user-agent strings to extract information about web clients. + +[discrete] +[[ingest-process-category-data-transformation]] +=== Data transformation processors + +[discrete] +[[ingest-process-category-data-transformation-general]] +==== General outcomes + +<>:: +Converts a field in the currently ingested document to a different type, such as converting a string to an integer. + +<>:: +Extracts structured fields out of a single text field within a document. +Unlike the <>, dissect does not use regular expressions. +This makes the dissect's a simpler and often faster alternative. + +<>:: +Extracts structured fields out of a single text field within a document, using the <> regular expression dialect that supports reusable aliased expressions. + +<>:: +Converts a string field by applying a regular expression and a replacement. + +<>:: +Uses the <> rules engine to obscure text in the input document matching the given Grok patterns. + +<>:: +Renames an existing field. + +<>:: +Sets a value on a field. + +[discrete] +[[ingest-process-category-data-transformation-specific]] +==== Specific outcomes + +<>:: +Converts a human-readable byte value to its value in bytes (for example `1kb` becomes `1024`). + +<>:: +Extracts a single line of CSV data from a text field. + +<>:: +Extracts and converts date fields. + +<> processor:: +Expands a field with dots into an object field. + +<>:: +Removes HTML tags from a field. + +<>:: +Joins each element of an array into a single string using a separator character between each element. + +<>:: +Parse messages (or specific event fields) containing key-value pairs. + +<> and <>:: +Converts a string field to lowercase or uppercase. + +<>:: +Splits a field into an array of values. + +<>:: +Trims whitespace from field. + +[discrete] +[[ingest-process-category-data-filtering]] +=== Data filtering processors + +<>:: +Drops the document without raising any errors. + +<>:: +Removes fields from documents. + +[discrete] +[[ingest-process-category-pipeline-handling]] +=== Pipeline handling processors + +<>:: +Raises an exception. Useful for when you expect a pipeline to fail and want to relay a specific message to the requester. + +<>:: +Executes another pipeline. + +<>:: +Reroutes documents to another target index or data stream. + +[discrete] +[[ingest-process-category-array-json-handling]] +=== Array/JSON handling processors + +<>:: +Runs an ingest processor on each element of an array or object. + +<>:: +Converts a JSON string into a structured JSON object. + +<>:: +Runs an inline or stored <> on incoming documents. +The script runs in the {painless}/painless-ingest-processor-context.html[painless `ingest` context]. + +<>:: +Sorts the elements of an array in ascending or descending order. [discrete] [[ingest-process-plugins]] -=== Processor plugins +=== Add additional processors You can install additional processors as {plugins}/ingest.html[plugins]. From a185c2f77f37b0de445f5539d32f9cfa34c2a159 Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Wed, 17 Jan 2024 13:23:14 +0200 Subject: [PATCH 72/95] Support patch transport version from 8.12 (#104406) * Downsampling supports date_histogram with tz This comes with caveats, for downsampled indexes at intervals more than 15 minutes. For instance, - 1-hour downsampling will produce inaccurate results for 1-hour histograms on timezones shifted by XX:30 - 1-day downsampling will produce inaccurate daily histograms for not-UTC timezones as it tracks days at UTC. Related to #101309 * Update docs/changelog/103511.yaml * test daylight savings * update documentation * Offset time buckets over downsampled data with TZ * Update docs/changelog/103511.yaml * check for TSDS * fixme for transport version * add interval to index metadata * add transport version * bump up transport version * address feedbcak * spotless fix * Support patch transport version from 8.12 * Update docs/changelog/104406.yaml --- docs/changelog/104406.yaml | 5 +++++ .../java/org/elasticsearch/TransportVersions.java | 1 + .../bucket/histogram/InternalDateHistogram.java | 13 +++++++++++-- 3 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/104406.yaml diff --git a/docs/changelog/104406.yaml b/docs/changelog/104406.yaml new file mode 100644 index 0000000000000..d26ef664abc07 --- /dev/null +++ b/docs/changelog/104406.yaml @@ -0,0 +1,5 @@ +pr: 104406 +summary: Support patch transport version from 8.12 +area: Downsampling +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 531ad7d0f7ba9..fb83ecd51d59f 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -170,6 +170,7 @@ static TransportVersion def(int id) { public static final TransportVersion MISSED_INDICES_UPDATE_EXCEPTION_ADDED = def(8_558_00_0); public static final TransportVersion INFERENCE_SERVICE_EMBEDDING_SIZE_ADDED = def(8_559_00_0); public static final TransportVersion ENRICH_ELASTICSEARCH_VERSION_REMOVED = def(8_560_00_0); + public static final TransportVersion DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ_8_12_PATCH = def(8_560_00_1); public static final TransportVersion NODE_STATS_REQUEST_SIMPLIFIED = def(8_561_00_0); public static final TransportVersion TEXT_EXPANSION_TOKEN_PRUNING_CONFIG_ADDED = def(8_562_00_0); public static final TransportVersion ESQL_ASYNC_QUERY = def(8_563_00_0); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 449326b1d69bb..41b40755dc6e1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; @@ -232,6 +233,14 @@ public int hashCode() { this.downsampledResultsOffset = downsampledResultsOffset; } + boolean versionSupportsDownsamplingTimezone(TransportVersion version) { + return version.onOrAfter(TransportVersions.DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ) + || version.between( + TransportVersions.DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ_8_12_PATCH, + TransportVersions.NODE_STATS_REQUEST_SIMPLIFIED + ); + } + /** * Stream from a stream. */ @@ -247,7 +256,7 @@ public InternalDateHistogram(StreamInput in) throws IOException { offset = in.readLong(); format = in.readNamedWriteable(DocValueFormat.class); keyed = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersions.DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ)) { + if (versionSupportsDownsamplingTimezone(in.getTransportVersion())) { downsampledResultsOffset = in.readBoolean(); } else { downsampledResultsOffset = false; @@ -265,7 +274,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeLong(offset); out.writeNamedWriteable(format); out.writeBoolean(keyed); - if (out.getTransportVersion().onOrAfter(TransportVersions.DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ)) { + if (versionSupportsDownsamplingTimezone(out.getTransportVersion())) { out.writeBoolean(downsampledResultsOffset); } out.writeCollection(buckets); From de992c278d793d800d0ec85c91c1a98f58c10cca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Wed, 17 Jan 2024 12:52:54 +0100 Subject: [PATCH 73/95] Add Query Users API (#104033) * Add query users API --- docs/changelog/104033.yaml | 5 + .../core/security/action/ActionTypes.java | 3 + .../action/user/QueryUserRequest.java | 99 ++++ .../action/user/QueryUserResponse.java | 94 ++++ .../privilege/ClusterPrivilegeResolver.java | 2 + .../action/user/QueryUserRequestTests.java | 43 ++ .../authz/privilege/PrivilegeTests.java | 9 +- .../xpack/security/operator/Constants.java | 1 + .../xpack/security/QueryUserIT.java | 490 ++++++++++++++++++ .../security/SecurityInBasicRestTestCase.java | 4 + .../src/javaRestTest/resources/roles.yml | 5 + .../xpack/security/Security.java | 4 + .../action/user/TransportQueryUserAction.java | 107 ++++ .../authc/esnative/NativeUsersStore.java | 37 ++ .../rest/action/user/RestQueryUserAction.java | 115 ++++ .../SecurityIndexFieldNameTranslator.java | 84 +++ .../support/UserBoolQueryBuilder.java | 101 ++++ .../user/TransportQueryUserActionTests.java | 86 +++ .../action/user/RestQueryUserActionTests.java | 175 +++++++ .../support/UserBoolQueryBuilderTests.java | 221 ++++++++ 20 files changed, 1679 insertions(+), 6 deletions(-) create mode 100644 docs/changelog/104033.yaml create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequest.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserResponse.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequestTests.java create mode 100644 x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryUserIT.java create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserAction.java create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserAction.java create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexFieldNameTranslator.java create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilder.java create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserActionTests.java create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilderTests.java diff --git a/docs/changelog/104033.yaml b/docs/changelog/104033.yaml new file mode 100644 index 0000000000000..d3e167665732c --- /dev/null +++ b/docs/changelog/104033.yaml @@ -0,0 +1,5 @@ +pr: 104033 +summary: Add Query Users API +area: Security +type: enhancement +issues: [] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java index fbc08a0dee8aa..bdb721df2ffd9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; +import org.elasticsearch.xpack.core.security.action.user.QueryUserResponse; /** * A collection of actions types for the Security plugin that need to be available in xpack.core.security and thus cannot be stored @@ -20,4 +21,6 @@ public final class ActionTypes { public static final ActionType RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION = ActionType.localOnly( "cluster:admin/xpack/security/remote_cluster_credentials/reload" ); + + public static final ActionType QUERY_USER_ACTION = ActionType.localOnly("cluster:admin/xpack/security/user/query"); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequest.java new file mode 100644 index 0000000000000..6db7e93b66eda --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequest.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +/** + * Request for the query Users API.
    + * Model for API requests to the query users API + */ +public final class QueryUserRequest extends ActionRequest { + + @Nullable + private final QueryBuilder queryBuilder; + @Nullable + private final Integer from; + @Nullable + private final Integer size; + @Nullable + private final List fieldSortBuilders; + @Nullable + private final SearchAfterBuilder searchAfterBuilder; + + public QueryUserRequest() { + this(null); + } + + public QueryUserRequest(QueryBuilder queryBuilder) { + this(queryBuilder, null, null, null, null); + } + + public QueryUserRequest( + @Nullable QueryBuilder queryBuilder, + @Nullable Integer from, + @Nullable Integer size, + @Nullable List fieldSortBuilders, + @Nullable SearchAfterBuilder searchAfterBuilder + ) { + this.queryBuilder = queryBuilder; + this.from = from; + this.size = size; + this.fieldSortBuilders = fieldSortBuilders; + this.searchAfterBuilder = searchAfterBuilder; + } + + public QueryBuilder getQueryBuilder() { + return queryBuilder; + } + + public Integer getFrom() { + return from; + } + + public Integer getSize() { + return size; + } + + public List getFieldSortBuilders() { + return fieldSortBuilders; + } + + public SearchAfterBuilder getSearchAfterBuilder() { + return searchAfterBuilder; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (from != null && from < 0) { + validationException = addValidationError("[from] parameter cannot be negative but was [" + from + "]", validationException); + } + if (size != null && size < 0) { + validationException = addValidationError("[size] parameter cannot be negative but was [" + size + "]", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + TransportAction.localOnly(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserResponse.java new file mode 100644 index 0000000000000..57d156cf05ca0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/QueryUserResponse.java @@ -0,0 +1,94 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.security.user.User; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Objects; + +/** + * Response for the query Users API.
    + * Model used to serialize information about the Users that were found. + */ +public final class QueryUserResponse extends ActionResponse implements ToXContentObject { + + private final long total; + private final Item[] items; + + public QueryUserResponse(long total, Collection items) { + this.total = total; + Objects.requireNonNull(items, "items must be provided"); + this.items = items.toArray(new Item[0]); + } + + public static QueryUserResponse emptyResponse() { + return new QueryUserResponse(0, Collections.emptyList()); + } + + public long getTotal() { + return total; + } + + public Item[] getItems() { + return items; + } + + public int getCount() { + return items.length; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().field("total", total).field("count", items.length).array("users", (Object[]) items); + return builder.endObject(); + } + + @Override + public String toString() { + return "QueryUsersResponse{" + "total=" + total + ", items=" + Arrays.toString(items) + '}'; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + TransportAction.localOnly(); + } + + public record Item(User user, @Nullable Object[] sortValues) implements ToXContentObject { + + @Override + public Object[] sortValues() { + return sortValues; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + user.innerToXContent(builder); + if (sortValues != null && sortValues.length > 0) { + builder.array("_sort", sortValues); + } + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return "Item{" + "user=" + user + ", sortValues=" + Arrays.toString(sortValues) + '}'; + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java index f93599cdb98cc..ba6bca802070a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilegeResolver.java @@ -29,6 +29,7 @@ import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; import org.elasticsearch.xpack.core.ilm.action.GetStatusAction; import org.elasticsearch.xpack.core.ilm.action.ILMActions; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; import org.elasticsearch.xpack.core.security.action.apikey.GrantApiKeyAction; @@ -234,6 +235,7 @@ public class ClusterPrivilegeResolver { GetServiceAccountAction.NAME, GetServiceAccountCredentialsAction.NAME + "*", GetUsersAction.NAME, + ActionTypes.QUERY_USER_ACTION.name(), GetUserPrivilegesAction.NAME, // normally authorized under the "same-user" authz check, but added here for uniformity HasPrivilegesAction.NAME, GetSecuritySettingsAction.NAME diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequestTests.java new file mode 100644 index 0000000000000..e7d8ef0b65e39 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/QueryUserRequestTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.core.security.action.user; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.nullValue; + +public class QueryUserRequestTests extends ESTestCase { + public void testValidate() { + final QueryUserRequest request1 = new QueryUserRequest( + null, + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + null, + null + ); + assertThat(request1.validate(), nullValue()); + + final QueryUserRequest request2 = new QueryUserRequest( + null, + randomIntBetween(Integer.MIN_VALUE, -1), + randomIntBetween(0, Integer.MAX_VALUE), + null, + null + ); + assertThat(request2.validate().getMessage(), containsString("[from] parameter cannot be negative")); + + final QueryUserRequest request3 = new QueryUserRequest( + null, + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(Integer.MIN_VALUE, -1), + null, + null + ); + assertThat(request3.validate().getMessage(), containsString("[size] parameter cannot be negative")); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java index bddc30b8d7b83..21827c4b9a373 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; import org.elasticsearch.xpack.core.enrich.action.GetEnrichPolicyAction; import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheAction; import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; @@ -281,6 +282,7 @@ public void testReadSecurityPrivilege() { GetServiceAccountAction.NAME, GetServiceAccountCredentialsAction.NAME, GetUsersAction.NAME, + ActionTypes.QUERY_USER_ACTION.name(), HasPrivilegesAction.NAME, GetUserPrivilegesAction.NAME, GetSecuritySettingsAction.NAME @@ -339,16 +341,11 @@ public void testManageUserProfilePrivilege() { "cluster:admin/xpack/security/role/get", "cluster:admin/xpack/security/role/delete" ); - verifyClusterActionDenied( - ClusterPrivilegeResolver.MANAGE_USER_PROFILE, - "cluster:admin/xpack/security/role/put", - "cluster:admin/xpack/security/role/get", - "cluster:admin/xpack/security/role/delete" - ); verifyClusterActionDenied( ClusterPrivilegeResolver.MANAGE_USER_PROFILE, "cluster:admin/xpack/security/user/put", "cluster:admin/xpack/security/user/get", + "cluster:admin/xpack/security/user/query", "cluster:admin/xpack/security/user/delete" ); verifyClusterActionDenied( diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index cab0c2bff28f0..453f489240f77 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -282,6 +282,7 @@ public class Constants { "cluster:admin/xpack/security/user/change_password", "cluster:admin/xpack/security/user/delete", "cluster:admin/xpack/security/user/get", + "cluster:admin/xpack/security/user/query", "cluster:admin/xpack/security/user/has_privileges", "cluster:admin/xpack/security/user/list_privileges", "cluster:admin/xpack/security/user/put", diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryUserIT.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryUserIT.java new file mode 100644 index 0000000000000..8e6290163efcd --- /dev/null +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/QueryUserIT.java @@ -0,0 +1,490 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security; + +import org.apache.http.HttpHeaders; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.core.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.security.user.User; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; + +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.hamcrest.core.IsNot.not; + +public class QueryUserIT extends SecurityInBasicRestTestCase { + + private static final String READ_USERS_USER_AUTH_HEADER = "Basic cmVhZF91c2Vyc191c2VyOnJlYWQtdXNlcnMtcGFzc3dvcmQ="; + private static final String TEST_USER_NO_READ_USERS_AUTH_HEADER = "Basic c2VjdXJpdHlfdGVzdF91c2VyOnNlY3VyaXR5LXRlc3QtcGFzc3dvcmQ="; + + private static final Set reservedUsers = Set.of( + "elastic", + "kibana", + "kibana_system", + "logstash_system", + "beats_system", + "apm_system", + "remote_monitoring_user" + ); + + private Request queryUserRequestWithAuth() { + final Request request = new Request(randomFrom("POST", "GET"), "/_security/_query/user"); + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_USERS_USER_AUTH_HEADER)); + return request; + } + + public void testQuery() throws IOException { + // No users to match yet + assertQuery("", users -> assertThat(users, empty())); + + int randomUserCount = createRandomUsers().size(); + + // An empty request body means search for all users (page size = 10) + assertQuery("", users -> assertThat(users.size(), equalTo(Math.min(randomUserCount, 10)))); + + // Match all + assertQuery( + String.format(""" + {"query":{"match_all":{}},"from":0,"size":%s}""", randomUserCount), + users -> assertThat(users.size(), equalTo(randomUserCount)) + ); + + // Exists query + String field = randomFrom("username", "full_name", "roles", "enabled"); + assertQuery( + String.format(""" + {"query":{"exists":{"field":"%s"}},"from":0,"size":%s}""", field, randomUserCount), + users -> assertEquals(users.size(), randomUserCount) + ); + + // Prefix search + User prefixUser1 = createUser( + "mr-prefix1", + new String[] { "master-of-the-universe", "some-other-role" }, + "Prefix1", + "email@something.com", + Map.of(), + true + ); + User prefixUser2 = createUser( + "mr-prefix2", + new String[] { "master-of-the-world", "some-other-role" }, + "Prefix2", + "email@something.com", + Map.of(), + true + ); + assertQuery(""" + {"query":{"bool":{"must":[{"prefix":{"roles":"master-of-the"}}]}}}""", returnedUsers -> { + assertThat(returnedUsers, hasSize(2)); + assertUser(prefixUser1, returnedUsers.get(0)); + assertUser(prefixUser2, returnedUsers.get(1)); + }); + + // Wildcard search + assertQuery(""" + { "query": { "wildcard": {"username": "mr-prefix*"} } }""", users -> { + assertThat(users.size(), equalTo(2)); + assertUser(prefixUser1, users.get(0)); + assertUser(prefixUser2, users.get(1)); + users.forEach(k -> assertThat(k, not(hasKey("_sort")))); + }); + + // Terms query + assertQuery(""" + {"query":{"terms":{"roles":["some-other-role"]}}}""", users -> { + assertThat(users.size(), equalTo(2)); + assertUser(prefixUser1, users.get(0)); + assertUser(prefixUser2, users.get(1)); + }); + + // Test other fields + User otherFieldsTestUser = createUser( + "batman-official-user", + new String[] { "bat-cave-admin" }, + "Batman", + "batman@hotmail.com", + Map.of(), + true + ); + String enabledTerm = "\"enabled\":true"; + String fullNameTerm = "\"full_name\":\"batman\""; + String emailTerm = "\"email\":\"batman@hotmail.com\""; + + final String term = randomFrom(enabledTerm, fullNameTerm, emailTerm); + assertQuery( + Strings.format(""" + {"query":{"term":{%s}},"size":100}""", term), + users -> assertThat( + users.stream().map(u -> u.get(User.Fields.USERNAME.getPreferredName()).toString()).toList(), + hasItem("batman-official-user") + ) + ); + + // Test complex query + assertQuery(""" + { "query": {"bool": {"must": [ + {"wildcard": {"username": "batman-official*"}}, + {"term": {"enabled": true}}],"filter": [{"prefix": {"roles": "bat-cave"}}]}}}""", users -> { + assertThat(users.size(), equalTo(1)); + assertUser(otherFieldsTestUser, users.get(0)); + }); + + // Search for fields outside the allowlist fails + assertQueryError(400, """ + { "query": { "prefix": {"not_allowed": "ABC"} } }"""); + + // Search for fields that are not allowed in Query DSL but used internally by the service itself + final String fieldName = randomFrom("type", "password"); + assertQueryError(400, Strings.format(""" + { "query": { "term": {"%s": "%s"} } }""", fieldName, randomAlphaOfLengthBetween(3, 8))); + + // User without read_security gets 403 trying to search Users + assertQueryError(TEST_USER_NO_READ_USERS_AUTH_HEADER, 403, """ + { "query": { "wildcard": {"name": "*prefix*"} } }"""); + + // Range query not supported + assertQueryError(400, """ + {"query":{"range":{"username":{"lt":"now"}}}}"""); + + // IDs query not supported + assertQueryError(400, """ + { "query": { "ids": { "values": "abc" } } }"""); + + // Make sure we can't query reserved users + String reservedUsername = getReservedUsernameAndAssertExists(); + assertQuery(String.format(""" + {"query":{"term":{"username":"%s"}}}""", reservedUsername), users -> assertTrue(users.isEmpty())); + } + + public void testPagination() throws IOException { + final List users = createRandomUsers(); + + final int from = randomIntBetween(0, 3); + final int size = randomIntBetween(2, 5); + final int remaining = users.size() - from; + + // Using string only sorting to simplify test + final String sortField = "username"; + final List> allUserInfos = new ArrayList<>(remaining); + { + Request request = queryUserRequestWithAuth(); + request.setJsonEntity("{\"from\":" + from + ",\"size\":" + size + ",\"sort\":[\"" + sortField + "\"]}"); + allUserInfos.addAll(collectUsers(request, users.size())); + } + // first batch should be a full page + assertThat(allUserInfos.size(), equalTo(size)); + + while (allUserInfos.size() < remaining) { + final Request request = queryUserRequestWithAuth(); + final List sortValues = extractSortValues(allUserInfos.get(allUserInfos.size() - 1)); + + request.setJsonEntity(Strings.format(""" + {"size":%s,"sort":["%s"],"search_after":["%s"]} + """, size, sortField, sortValues.get(0))); + final List> userInfoPage = collectUsers(request, users.size()); + + if (userInfoPage.isEmpty() && allUserInfos.size() < remaining) { + fail("fail to retrieve all Users, expect [" + remaining + "], got [" + allUserInfos + "]"); + } + allUserInfos.addAll(userInfoPage); + + // Before all users are retrieved, each page should be a full page + if (allUserInfos.size() < remaining) { + assertThat(userInfoPage.size(), equalTo(size)); + } + } + + // Assert sort values match the field of User information + assertThat( + allUserInfos.stream().map(m -> m.get(sortField)).toList(), + equalTo(allUserInfos.stream().map(m -> extractSortValues(m).get(0)).toList()) + ); + + // Assert that all users match the created users and that they're sorted correctly + assertUsers(users, allUserInfos, sortField, from); + + // size can be zero, but total should still reflect the number of users matched + final Request request = queryUserRequestWithAuth(); + request.setJsonEntity("{\"size\":0}"); + final Response response = client().performRequest(request); + assertOK(response); + final Map responseMap = responseAsMap(response); + assertThat(responseMap.get("total"), equalTo(users.size())); + assertThat(responseMap.get("count"), equalTo(0)); + } + + @SuppressWarnings("unchecked") + public void testSort() throws IOException { + final List testUsers = List.of( + createUser("a", new String[] { "4", "5", "6" }), + createUser("b", new String[] { "5", "6" }), + createUser("c", new String[] { "7", "8" }) + ); + assertQuery(""" + {"sort":[{"username":{"order":"desc"}}]}""", users -> { + assertThat(users.size(), equalTo(3)); + for (int i = 2, j = 0; i >= 0; i--, j++) { + assertUser(testUsers.get(j), users.get(i)); + assertThat(users.get(i).get("username"), equalTo(((List) users.get(i).get("_sort")).get(0))); + } + }); + + assertQuery(""" + {"sort":[{"username":{"order":"asc"}}]}""", users -> { + assertThat(users.size(), equalTo(3)); + for (int i = 0; i <= 2; i++) { + assertUser(testUsers.get(i), users.get(i)); + assertThat(users.get(i).get("username"), equalTo(((List) users.get(i).get("_sort")).get(0))); + } + }); + + assertQuery(""" + {"sort":[{"roles":{"order":"asc"}}]}""", users -> { + assertThat(users.size(), equalTo(3)); + for (int i = 0; i <= 2; i++) { + assertUser(testUsers.get(i), users.get(i)); + // Only first element of array is used for sorting + assertThat(((List) users.get(i).get("roles")).get(0), equalTo(((List) users.get(i).get("_sort")).get(0))); + } + }); + + // Make sure sorting on _doc works + assertQuery(""" + {"sort":["_doc"]}""", users -> assertThat(users.size(), equalTo(3))); + + // Make sure multi-field sorting works + assertQuery(""" + {"sort":[{"username":{"order":"asc"}}, {"roles":{"order":"asc"}}]}""", users -> { + assertThat(users.size(), equalTo(3)); + for (int i = 0; i <= 2; i++) { + assertUser(testUsers.get(i), users.get(i)); + assertThat(users.get(i).get("username"), equalTo(((List) users.get(i).get("_sort")).get(0))); + assertThat(((List) users.get(i).get("roles")).get(0), equalTo(((List) users.get(i).get("_sort")).get(1))); + } + }); + + final String invalidFieldName = randomFrom("doc_type", "invalid", "password"); + assertQueryError(400, "{\"sort\":[\"" + invalidFieldName + "\"]}"); + + final String invalidSortName = randomFrom("email", "full_name"); + assertQueryError( + READ_USERS_USER_AUTH_HEADER, + 400, + String.format("{\"sort\":[\"%s\"]}", invalidSortName), + String.format("sorting is not supported for field [%s] in User query", invalidSortName) + ); + } + + private String getReservedUsernameAndAssertExists() throws IOException { + String username = randomFrom(reservedUsers); + final Request request = new Request("GET", "/_security/user"); + + if (randomBoolean()) { + // Update the user to create it in the security index + Request putUserRequest = new Request("PUT", "/_security/user/" + username); + putUserRequest.setJsonEntity("{\"enabled\": true}"); + } + + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, READ_USERS_USER_AUTH_HEADER)); + final Response response = client().performRequest(request); + assertOK(response); + final Map responseMap = responseAsMap(response); + assertNotNull(responseMap.get(username)); + return username; + } + + @SuppressWarnings("unchecked") + private List extractSortValues(Map userInfo) { + return (List) userInfo.get("_sort"); + } + + private List> collectUsers(Request request, int total) throws IOException { + final Response response = client().performRequest(request); + assertOK(response); + final Map responseMap = responseAsMap(response); + @SuppressWarnings("unchecked") + final List> userInfos = (List>) responseMap.get("users"); + assertThat(responseMap.get("total"), equalTo(total)); + assertThat(responseMap.get("count"), equalTo(userInfos.size())); + return userInfos; + } + + private void assertQueryError(int statusCode, String body) { + assertQueryError(READ_USERS_USER_AUTH_HEADER, statusCode, body); + } + + private void assertQueryError(String authHeader, int statusCode, String body) { + assertQueryError(authHeader, statusCode, body, null); + } + + private void assertQueryError(String authHeader, int statusCode, String body, String errorMessage) { + final Request request = new Request(randomFrom("GET", "POST"), "/_security/_query/user"); + request.setJsonEntity(body); + request.setOptions(request.getOptions().toBuilder().addHeader(HttpHeaders.AUTHORIZATION, authHeader)); + final ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest(request)); + assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(statusCode)); + if (errorMessage != null) { + assertTrue(responseException.getMessage().contains(errorMessage)); + } + } + + private void assertQuery(String body, Consumer>> userVerifier) throws IOException { + final Request request = queryUserRequestWithAuth(); + request.setJsonEntity(body); + final Response response = client().performRequest(request); + assertOK(response); + final Map responseMap = responseAsMap(response); + @SuppressWarnings("unchecked") + final List> users = (List>) responseMap.get("users"); + userVerifier.accept(users); + } + + private void assertUser(User expectedUser, Map actualUser) { + assertUser(userToMap(expectedUser), actualUser); + } + + @SuppressWarnings("unchecked") + private void assertUser(Map expectedUser, Map actualUser) { + assertEquals(expectedUser.get(User.Fields.USERNAME.getPreferredName()), actualUser.get(User.Fields.USERNAME.getPreferredName())); + assertArrayEquals( + ((List) expectedUser.get(User.Fields.ROLES.getPreferredName())).toArray(), + ((List) actualUser.get(User.Fields.ROLES.getPreferredName())).toArray() + ); + assertEquals(expectedUser.get(User.Fields.FULL_NAME.getPreferredName()), actualUser.get(User.Fields.FULL_NAME.getPreferredName())); + assertEquals(expectedUser.get(User.Fields.EMAIL.getPreferredName()), actualUser.get(User.Fields.EMAIL.getPreferredName())); + assertEquals(expectedUser.get(User.Fields.METADATA.getPreferredName()), actualUser.get(User.Fields.METADATA.getPreferredName())); + assertEquals(expectedUser.get(User.Fields.ENABLED.getPreferredName()), actualUser.get(User.Fields.ENABLED.getPreferredName())); + } + + private Map userToMap(User user) { + return Map.of( + User.Fields.USERNAME.getPreferredName(), + user.principal(), + User.Fields.ROLES.getPreferredName(), + Arrays.stream(user.roles()).toList(), + User.Fields.FULL_NAME.getPreferredName(), + user.fullName(), + User.Fields.EMAIL.getPreferredName(), + user.email(), + User.Fields.METADATA.getPreferredName(), + user.metadata(), + User.Fields.ENABLED.getPreferredName(), + user.enabled() + ); + } + + private void assertUsers(List expectedUsers, List> actualUsers, String sortField, int from) { + assertEquals(expectedUsers.size() - from, actualUsers.size()); + + List> sortedExpectedUsers = expectedUsers.stream() + .map(this::userToMap) + .sorted(Comparator.comparing(user -> user.get(sortField).toString())) + .toList(); + + for (int i = from; i < sortedExpectedUsers.size(); i++) { + assertUser(sortedExpectedUsers.get(i), actualUsers.get(i - from)); + } + } + + public static Map randomUserMetadata() { + return ESTestCase.randomFrom( + Map.of( + "employee_id", + ESTestCase.randomAlphaOfLength(5), + "number", + 1, + "numbers", + List.of(1, 3, 5), + "extra", + Map.of("favorite pizza", "margherita", "age", 42) + ), + Map.of(ESTestCase.randomAlphaOfLengthBetween(3, 8), ESTestCase.randomAlphaOfLengthBetween(3, 8)), + Map.of(), + null + ); + } + + private List createRandomUsers() throws IOException { + int randomUserCount = randomIntBetween(8, 15); + final List users = new ArrayList<>(randomUserCount); + + for (int i = 0; i < randomUserCount; i++) { + users.add( + createUser( + randomValueOtherThanMany(reservedUsers::contains, () -> randomAlphaOfLengthBetween(3, 8)) + "-" + i, + randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(3, 8)), + randomAlphaOfLengthBetween(3, 8), + randomAlphaOfLengthBetween(3, 8), + randomUserMetadata(), + randomBoolean() + ) + ); + } + + return users; + } + + private User createUser(String userName, String[] roles) throws IOException { + return createUser( + userName, + roles, + randomAlphaOfLengthBetween(3, 8), + randomAlphaOfLengthBetween(3, 8), + randomUserMetadata(), + randomBoolean() + ); + } + + private User createUser(String userName, String[] roles, String fullName, String email, Map metadata, boolean enabled) + throws IOException { + + final Request request = new Request("POST", "/_security/user/" + userName); + BytesReference source = BytesReference.bytes( + jsonBuilder().map( + Map.of( + User.Fields.USERNAME.getPreferredName(), + userName, + User.Fields.ROLES.getPreferredName(), + roles, + User.Fields.FULL_NAME.getPreferredName(), + fullName, + User.Fields.EMAIL.getPreferredName(), + email, + User.Fields.METADATA.getPreferredName(), + metadata == null ? Map.of() : metadata, + User.Fields.PASSWORD.getPreferredName(), + "100%-security-guaranteed", + User.Fields.ENABLED.getPreferredName(), + enabled + ) + ) + ); + request.setJsonEntity(source.utf8ToString()); + Response response = adminClient().performRequest(request); + assertOK(response); + assertTrue((boolean) responseAsMap(response).get("created")); + return new User(userName, roles, fullName, email, metadata, enabled); + } +} diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java index 5843350e36457..587cc4643514c 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java @@ -22,6 +22,9 @@ public abstract class SecurityInBasicRestTestCase extends ESRestTestCase { protected static final String REST_USER = "security_test_user"; private static final SecureString REST_PASSWORD = new SecureString("security-test-password".toCharArray()); + protected static final String READ_USERS_USER = "read_users_user"; + private static final SecureString READ_USERS_PASSWORD = new SecureString("read-users-password".toCharArray()); + private static final String ADMIN_USER = "admin_user"; private static final SecureString ADMIN_PASSWORD = new SecureString("admin-password".toCharArray()); @@ -47,6 +50,7 @@ public abstract class SecurityInBasicRestTestCase extends ESRestTestCase { .user(REST_USER, REST_PASSWORD.toString(), "security_test_role", false) .user(API_KEY_USER, API_KEY_USER_PASSWORD.toString(), "api_key_user_role", false) .user(API_KEY_ADMIN_USER, API_KEY_ADMIN_USER_PASSWORD.toString(), "api_key_admin_role", false) + .user(READ_USERS_USER, READ_USERS_PASSWORD.toString(), "read_users_user_role", false) .build(); @Override diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/resources/roles.yml b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/resources/roles.yml index 47f1c05ffaaf8..15c291274bcdb 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/resources/roles.yml +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/resources/roles.yml @@ -18,6 +18,11 @@ api_key_user_role: cluster: - manage_own_api_key +# Used to perform query user operations +read_users_user_role: + cluster: + - read_security + # Role with remote indices privileges role_remote_indices: remote_indices: diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index c6b441d9cc04f..b6c6ea60d869f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -260,6 +260,7 @@ import org.elasticsearch.xpack.security.action.user.TransportGetUsersAction; import org.elasticsearch.xpack.security.action.user.TransportHasPrivilegesAction; import org.elasticsearch.xpack.security.action.user.TransportPutUserAction; +import org.elasticsearch.xpack.security.action.user.TransportQueryUserAction; import org.elasticsearch.xpack.security.action.user.TransportSetEnabledAction; import org.elasticsearch.xpack.security.audit.AuditTrail; import org.elasticsearch.xpack.security.audit.AuditTrailService; @@ -365,6 +366,7 @@ import org.elasticsearch.xpack.security.rest.action.user.RestHasPrivilegesAction; import org.elasticsearch.xpack.security.rest.action.user.RestProfileHasPrivilegesAction; import org.elasticsearch.xpack.security.rest.action.user.RestPutUserAction; +import org.elasticsearch.xpack.security.rest.action.user.RestQueryUserAction; import org.elasticsearch.xpack.security.rest.action.user.RestSetEnabledAction; import org.elasticsearch.xpack.security.support.CacheInvalidatorRegistry; import org.elasticsearch.xpack.security.support.ExtensionComponents; @@ -1315,6 +1317,7 @@ public void onIndexModule(IndexModule module) { new ActionHandler<>(ClearPrivilegesCacheAction.INSTANCE, TransportClearPrivilegesCacheAction.class), new ActionHandler<>(ClearSecurityCacheAction.INSTANCE, TransportClearSecurityCacheAction.class), new ActionHandler<>(GetUsersAction.INSTANCE, TransportGetUsersAction.class), + new ActionHandler<>(ActionTypes.QUERY_USER_ACTION, TransportQueryUserAction.class), new ActionHandler<>(PutUserAction.INSTANCE, TransportPutUserAction.class), new ActionHandler<>(DeleteUserAction.INSTANCE, TransportDeleteUserAction.class), new ActionHandler<>(GetRolesAction.INSTANCE, TransportGetRolesAction.class), @@ -1406,6 +1409,7 @@ public List getRestHandlers( new RestClearApiKeyCacheAction(settings, getLicenseState()), new RestClearServiceAccountTokenStoreCacheAction(settings, getLicenseState()), new RestGetUsersAction(settings, getLicenseState()), + new RestQueryUserAction(settings, getLicenseState()), new RestPutUserAction(settings, getLicenseState()), new RestDeleteUserAction(settings, getLicenseState()), new RestGetRolesAction(settings, getLicenseState()), diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserAction.java new file mode 100644 index 0000000000000..2a9aef73ff62a --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserAction.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.ActionTypes; +import org.elasticsearch.xpack.core.security.action.user.QueryUserRequest; +import org.elasticsearch.xpack.core.security.action.user.QueryUserResponse; +import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; +import org.elasticsearch.xpack.security.support.UserBoolQueryBuilder; + +import java.util.List; +import java.util.Locale; +import java.util.Set; + +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; +import static org.elasticsearch.xpack.security.support.UserBoolQueryBuilder.USER_FIELD_NAME_TRANSLATOR; + +public final class TransportQueryUserAction extends TransportAction { + private final NativeUsersStore usersStore; + private static final Set FIELD_NAMES_WITH_SORT_SUPPORT = Set.of("username", "roles", "enabled"); + + @Inject + public TransportQueryUserAction(TransportService transportService, ActionFilters actionFilters, NativeUsersStore usersStore) { + super(ActionTypes.QUERY_USER_ACTION.name(), actionFilters, transportService.getTaskManager()); + this.usersStore = usersStore; + } + + @Override + protected void doExecute(Task task, QueryUserRequest request, ActionListener listener) { + final SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.searchSource() + .version(false) + .fetchSource(true) + .trackTotalHits(true); + + if (request.getFrom() != null) { + searchSourceBuilder.from(request.getFrom()); + } + if (request.getSize() != null) { + searchSourceBuilder.size(request.getSize()); + } + + searchSourceBuilder.query(UserBoolQueryBuilder.build(request.getQueryBuilder())); + + if (request.getFieldSortBuilders() != null) { + translateFieldSortBuilders(request.getFieldSortBuilders(), searchSourceBuilder); + } + + if (request.getSearchAfterBuilder() != null) { + searchSourceBuilder.searchAfter(request.getSearchAfterBuilder().getSortValues()); + } + + final SearchRequest searchRequest = new SearchRequest(new String[] { SECURITY_MAIN_ALIAS }, searchSourceBuilder); + usersStore.queryUsers(searchRequest, listener); + } + + // package private for testing + static void translateFieldSortBuilders(List fieldSortBuilders, SearchSourceBuilder searchSourceBuilder) { + fieldSortBuilders.forEach(fieldSortBuilder -> { + if (fieldSortBuilder.getNestedSort() != null) { + throw new IllegalArgumentException("nested sorting is not supported for User query"); + } + if (FieldSortBuilder.DOC_FIELD_NAME.equals(fieldSortBuilder.getFieldName())) { + searchSourceBuilder.sort(fieldSortBuilder); + } else { + final String translatedFieldName = USER_FIELD_NAME_TRANSLATOR.translate(fieldSortBuilder.getFieldName()); + if (FIELD_NAMES_WITH_SORT_SUPPORT.contains(translatedFieldName) == false) { + throw new IllegalArgumentException( + String.format(Locale.ROOT, "sorting is not supported for field [%s] in User query", fieldSortBuilder.getFieldName()) + ); + } + + if (translatedFieldName.equals(fieldSortBuilder.getFieldName())) { + searchSourceBuilder.sort(fieldSortBuilder); + } else { + final FieldSortBuilder translatedFieldSortBuilder = new FieldSortBuilder(translatedFieldName).order( + fieldSortBuilder.order() + ) + .missing(fieldSortBuilder.missing()) + .unmappedType(fieldSortBuilder.unmappedType()) + .setFormat(fieldSortBuilder.getFormat()); + + if (fieldSortBuilder.sortMode() != null) { + translatedFieldSortBuilder.sortMode(fieldSortBuilder.sortMode()); + } + if (fieldSortBuilder.getNumericType() != null) { + translatedFieldSortBuilder.setNumericType(fieldSortBuilder.getNumericType()); + } + searchSourceBuilder.sort(translatedFieldSortBuilder); + } + } + }); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java index 36f78682b6bd1..81aa487f73e2c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; @@ -43,6 +44,7 @@ import org.elasticsearch.xpack.core.security.action.user.ChangePasswordRequest; import org.elasticsearch.xpack.core.security.action.user.DeleteUserRequest; import org.elasticsearch.xpack.core.security.action.user.PutUserRequest; +import org.elasticsearch.xpack.core.security.action.user.QueryUserResponse; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.esnative.ClientReservedRealm; import org.elasticsearch.xpack.core.security.authc.support.Hasher; @@ -57,6 +59,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.function.Consumer; import java.util.function.Supplier; @@ -161,6 +164,40 @@ public void getUsers(String[] userNames, final ActionListener> } } + public void queryUsers(SearchRequest searchRequest, ActionListener listener) { + final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); + if (frozenSecurityIndex.indexExists() == false) { + logger.debug("security index does not exist"); + listener.onResponse(QueryUserResponse.emptyResponse()); + } else if (frozenSecurityIndex.isAvailable(SEARCH_SHARDS) == false) { + listener.onFailure(frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS)); + } else { + securityIndex.checkIndexVersionThenExecute( + listener::onFailure, + () -> executeAsyncWithOrigin( + client, + SECURITY_ORIGIN, + TransportSearchAction.TYPE, + searchRequest, + ActionListener.wrap(searchResponse -> { + final long total = searchResponse.getHits().getTotalHits().value; + if (total == 0) { + logger.debug("No users found for query [{}]", searchRequest.source().query()); + listener.onResponse(QueryUserResponse.emptyResponse()); + return; + } + + final List userItem = Arrays.stream(searchResponse.getHits().getHits()).map(hit -> { + UserAndPassword userAndPassword = transformUser(hit.getId(), hit.getSourceAsMap()); + return userAndPassword != null ? new QueryUserResponse.Item(userAndPassword.user(), hit.getSortValues()) : null; + }).filter(Objects::nonNull).toList(); + listener.onResponse(new QueryUserResponse(total, userItem)); + }, listener::onFailure) + ) + ); + } + } + void getUserCount(final ActionListener listener) { final SecurityIndexManager frozenSecurityIndex = this.securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserAction.java new file mode 100644 index 0000000000000..407fe36fa82d3 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserAction.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.rest.action.user; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xpack.core.security.action.ActionTypes; +import org.elasticsearch.xpack.core.security.action.user.QueryUserRequest; +import org.elasticsearch.xpack.security.rest.action.SecurityBaseRestHandler; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.index.query.AbstractQueryBuilder.parseTopLevelQuery; +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Rest action to search for Users + */ +public final class RestQueryUserAction extends SecurityBaseRestHandler { + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "query_user_request_payload", + a -> new Payload((QueryBuilder) a[0], (Integer) a[1], (Integer) a[2], (List) a[3], (SearchAfterBuilder) a[4]) + ); + + static { + PARSER.declareObject(optionalConstructorArg(), (p, c) -> parseTopLevelQuery(p), new ParseField("query")); + PARSER.declareInt(optionalConstructorArg(), new ParseField("from")); + PARSER.declareInt(optionalConstructorArg(), new ParseField("size")); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> { + if (p.currentToken() == XContentParser.Token.VALUE_STRING) { + return new FieldSortBuilder(p.text()); + } else if (p.currentToken() == XContentParser.Token.START_OBJECT) { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, p.nextToken(), p); + final FieldSortBuilder fieldSortBuilder = FieldSortBuilder.fromXContent(p, p.currentName()); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, p.nextToken(), p); + return fieldSortBuilder; + } else { + throw new IllegalArgumentException("mal-formatted sort object"); + } + }, new ParseField("sort")); + PARSER.declareField( + optionalConstructorArg(), + (p, c) -> SearchAfterBuilder.fromXContent(p), + new ParseField("search_after"), + ObjectParser.ValueType.VALUE_ARRAY + ); + } + + /** + * @param settings the node's settings + * @param licenseState the license state that will be used to determine if + * security is licensed + */ + public RestQueryUserAction(Settings settings, XPackLicenseState licenseState) { + super(settings, licenseState); + } + + @Override + public List routes() { + return List.of(new Route(GET, "/_security/_query/user"), new Route(POST, "/_security/_query/user")); + } + + @Override + public String getName() { + return "xpack_security_query_user"; + } + + @Override + protected RestChannelConsumer innerPrepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final QueryUserRequest queryUserRequest; + if (request.hasContentOrSourceParam()) { + final Payload payload = PARSER.parse(request.contentOrSourceParamParser(), null); + queryUserRequest = new QueryUserRequest( + payload.queryBuilder, + payload.from, + payload.size, + payload.fieldSortBuilders, + payload.searchAfterBuilder + ); + } else { + queryUserRequest = new QueryUserRequest(null, null, null, null, null); + } + return channel -> client.execute(ActionTypes.QUERY_USER_ACTION, queryUserRequest, new RestToXContentListener<>(channel)); + } + + private record Payload( + @Nullable QueryBuilder queryBuilder, + @Nullable Integer from, + @Nullable Integer size, + @Nullable List fieldSortBuilders, + @Nullable SearchAfterBuilder searchAfterBuilder + ) {} +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexFieldNameTranslator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexFieldNameTranslator.java new file mode 100644 index 0000000000000..291d55b7b0837 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexFieldNameTranslator.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.support; + +import java.util.List; +import java.util.function.Function; +import java.util.function.Predicate; + +public class SecurityIndexFieldNameTranslator { + + private final List fieldNameTranslators; + + public SecurityIndexFieldNameTranslator(List fieldNameTranslators) { + this.fieldNameTranslators = fieldNameTranslators; + } + + public String translate(String queryFieldName) { + for (FieldName fieldName : this.fieldNameTranslators) { + if (fieldName.supportsQueryName(queryFieldName)) { + return fieldName.indexFieldName(queryFieldName); + } + } + throw new IllegalArgumentException("Field [" + queryFieldName + "] is not allowed"); + } + + public boolean supportedIndexFieldName(String indexFieldName) { + for (FieldName fieldName : this.fieldNameTranslators) { + if (fieldName.supportsIndexName(indexFieldName)) { + return true; + } + } + return false; + } + + public static FieldName exact(String name) { + return exact(name, Function.identity()); + } + + public static FieldName exact(String name, Function translation) { + return new SecurityIndexFieldNameTranslator.ExactFieldName(name, translation); + } + + public abstract static class FieldName { + private final Function toIndexFieldName; + protected final Predicate validIndexNamePredicate; + + FieldName(Function toIndexFieldName, Predicate validIndexNamePredicate) { + this.toIndexFieldName = toIndexFieldName; + this.validIndexNamePredicate = validIndexNamePredicate; + } + + public abstract boolean supportsQueryName(String queryFieldName); + + public abstract boolean supportsIndexName(String indexFieldName); + + public String indexFieldName(String queryFieldName) { + return toIndexFieldName.apply(queryFieldName); + } + } + + private static class ExactFieldName extends FieldName { + private final String name; + + private ExactFieldName(String name, Function toIndexFieldName) { + super(toIndexFieldName, fieldName -> toIndexFieldName.apply(name).equals(fieldName)); + this.name = name; + } + + @Override + public boolean supportsQueryName(String queryFieldName) { + return queryFieldName.equals(name); + } + + @Override + public boolean supportsIndexName(String indexFieldName) { + return validIndexNamePredicate.test(indexFieldName); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilder.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilder.java new file mode 100644 index 0000000000000..5d3824ab1f8ce --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilder.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.support; + +import org.apache.lucene.search.Query; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.ExistsQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.PrefixQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.index.query.WildcardQueryBuilder; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.security.support.SecurityIndexFieldNameTranslator.exact; + +public class UserBoolQueryBuilder extends BoolQueryBuilder { + public static final SecurityIndexFieldNameTranslator USER_FIELD_NAME_TRANSLATOR = new SecurityIndexFieldNameTranslator( + List.of(exact("username"), exact("roles"), exact("full_name"), exact("email"), exact("enabled")) + ); + + private UserBoolQueryBuilder() {} + + public static UserBoolQueryBuilder build(QueryBuilder queryBuilder) { + UserBoolQueryBuilder userQueryBuilder = new UserBoolQueryBuilder(); + if (queryBuilder != null) { + QueryBuilder translaterdQueryBuilder = translateToUserQueryBuilder(queryBuilder); + userQueryBuilder.must(translaterdQueryBuilder); + } + userQueryBuilder.filter(QueryBuilders.termQuery("type", "user")); + + return userQueryBuilder; + } + + private static QueryBuilder translateToUserQueryBuilder(QueryBuilder qb) { + if (qb instanceof final BoolQueryBuilder query) { + final BoolQueryBuilder newQuery = QueryBuilders.boolQuery() + .minimumShouldMatch(query.minimumShouldMatch()) + .adjustPureNegative(query.adjustPureNegative()); + query.must().stream().map(UserBoolQueryBuilder::translateToUserQueryBuilder).forEach(newQuery::must); + query.should().stream().map(UserBoolQueryBuilder::translateToUserQueryBuilder).forEach(newQuery::should); + query.mustNot().stream().map(UserBoolQueryBuilder::translateToUserQueryBuilder).forEach(newQuery::mustNot); + query.filter().stream().map(UserBoolQueryBuilder::translateToUserQueryBuilder).forEach(newQuery::filter); + return newQuery; + } else if (qb instanceof MatchAllQueryBuilder) { + return qb; + } else if (qb instanceof final TermQueryBuilder query) { + final String translatedFieldName = USER_FIELD_NAME_TRANSLATOR.translate(query.fieldName()); + return QueryBuilders.termQuery(translatedFieldName, query.value()).caseInsensitive(query.caseInsensitive()); + } else if (qb instanceof final ExistsQueryBuilder query) { + final String translatedFieldName = USER_FIELD_NAME_TRANSLATOR.translate(query.fieldName()); + return QueryBuilders.existsQuery(translatedFieldName); + } else if (qb instanceof final TermsQueryBuilder query) { + if (query.termsLookup() != null) { + throw new IllegalArgumentException("Terms query with terms lookup is not supported for User query"); + } + final String translatedFieldName = USER_FIELD_NAME_TRANSLATOR.translate(query.fieldName()); + return QueryBuilders.termsQuery(translatedFieldName, query.getValues()); + } else if (qb instanceof final PrefixQueryBuilder query) { + final String translatedFieldName = USER_FIELD_NAME_TRANSLATOR.translate(query.fieldName()); + return QueryBuilders.prefixQuery(translatedFieldName, query.value()).caseInsensitive(query.caseInsensitive()); + } else if (qb instanceof final WildcardQueryBuilder query) { + final String translatedFieldName = USER_FIELD_NAME_TRANSLATOR.translate(query.fieldName()); + return QueryBuilders.wildcardQuery(translatedFieldName, query.value()) + .caseInsensitive(query.caseInsensitive()) + .rewrite(query.rewrite()); + } else { + throw new IllegalArgumentException("Query type [" + qb.getName() + "] is not supported for User query"); + } + } + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + context.setAllowedFields(this::isIndexFieldNameAllowed); + return super.doToQuery(context); + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + if (queryRewriteContext instanceof SearchExecutionContext) { + ((SearchExecutionContext) queryRewriteContext).setAllowedFields(this::isIndexFieldNameAllowed); + } + return super.doRewrite(queryRewriteContext); + } + + boolean isIndexFieldNameAllowed(String queryFieldName) { + // Type is needed to filter on user doc type + return queryFieldName.equals("type") || USER_FIELD_NAME_TRANSLATOR.supportedIndexFieldName(queryFieldName); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserActionTests.java new file mode 100644 index 0000000000000..aa5f935998757 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportQueryUserActionTests.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.action.user; + +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.search.sort.NestedSortBuilder; +import org.elasticsearch.search.sort.SortMode; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESTestCase; + +import java.util.List; +import java.util.Locale; +import java.util.stream.IntStream; + +import static org.hamcrest.Matchers.equalTo; + +public class TransportQueryUserActionTests extends ESTestCase { + private static final String[] allowedIndexFieldNames = new String[] { "username", "roles", "enabled" }; + + public void testTranslateFieldSortBuilders() { + final List fieldNames = List.of(allowedIndexFieldNames); + + final List originals = fieldNames.stream().map(this::randomFieldSortBuilderWithName).toList(); + + final SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.searchSource(); + TransportQueryUserAction.translateFieldSortBuilders(originals, searchSourceBuilder); + + IntStream.range(0, originals.size()).forEach(i -> { + final FieldSortBuilder original = originals.get(i); + final FieldSortBuilder translated = (FieldSortBuilder) searchSourceBuilder.sorts().get(i); + assertThat(original.getFieldName(), equalTo(translated.getFieldName())); + + assertThat(translated.order(), equalTo(original.order())); + assertThat(translated.missing(), equalTo(original.missing())); + assertThat(translated.unmappedType(), equalTo(original.unmappedType())); + assertThat(translated.getNumericType(), equalTo(original.getNumericType())); + assertThat(translated.getFormat(), equalTo(original.getFormat())); + assertThat(translated.sortMode(), equalTo(original.sortMode())); + }); + } + + public void testNestedSortingIsNotAllowed() { + final FieldSortBuilder fieldSortBuilder = new FieldSortBuilder("roles"); + fieldSortBuilder.setNestedSort(new NestedSortBuilder("something")); + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> TransportQueryUserAction.translateFieldSortBuilders(List.of(fieldSortBuilder), SearchSourceBuilder.searchSource()) + ); + assertThat(e.getMessage(), equalTo("nested sorting is not supported for User query")); + } + + public void testNestedSortingOnTextFieldsNotAllowed() { + String fieldName = randomFrom("full_name", "email"); + final List fieldNames = List.of(fieldName); + final List originals = fieldNames.stream().map(this::randomFieldSortBuilderWithName).toList(); + final SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.searchSource(); + + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> TransportQueryUserAction.translateFieldSortBuilders(originals, searchSourceBuilder) + ); + assertThat(e.getMessage(), equalTo(String.format(Locale.ROOT, "sorting is not supported for field [%s] in User query", fieldName))); + } + + private FieldSortBuilder randomFieldSortBuilderWithName(String name) { + final FieldSortBuilder fieldSortBuilder = new FieldSortBuilder(name); + fieldSortBuilder.order(randomBoolean() ? SortOrder.ASC : SortOrder.DESC); + fieldSortBuilder.setFormat(randomBoolean() ? randomAlphaOfLengthBetween(3, 16) : null); + if (randomBoolean()) { + fieldSortBuilder.setNumericType(randomFrom("long", "double", "date", "date_nanos")); + } + if (randomBoolean()) { + fieldSortBuilder.missing(randomAlphaOfLengthBetween(3, 8)); + } + if (randomBoolean()) { + fieldSortBuilder.sortMode(randomFrom(SortMode.values())); + } + return fieldSortBuilder; + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java new file mode 100644 index 0000000000000..4a593eeb24ac6 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestQueryUserActionTests.java @@ -0,0 +1,175 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.rest.action.user; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.PrefixQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.AbstractRestChannel; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.searchafter.SearchAfterBuilder; +import org.elasticsearch.search.sort.FieldSortBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.action.user.QueryUserRequest; +import org.elasticsearch.xpack.core.security.action.user.QueryUserResponse; + +import java.util.List; + +import static org.hamcrest.collection.IsCollectionWithSize.hasSize; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.mockito.Mockito.mock; + +public class RestQueryUserActionTests extends ESTestCase { + + private final XPackLicenseState mockLicenseState = mock(XPackLicenseState.class); + + @Override + protected NamedXContentRegistry xContentRegistry() { + final SearchModule searchModule = new SearchModule(Settings.EMPTY, List.of()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } + + public void testQueryParsing() throws Exception { + final String query1 = """ + { + "query": { + "bool": { + "must": [ + { + "terms": { + "username": [ "bart", "homer" ] + } + } + ], + "should": [ { "prefix": { "username": "ba" } } ] + } + } + }"""; + final FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray(query1), + XContentType.JSON + ).build(); + + final SetOnce responseSetOnce = new SetOnce<>(); + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + @Override + public void sendResponse(RestResponse restResponse) { + responseSetOnce.set(restResponse); + } + }; + + try (var threadPool = createThreadPool()) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { + @SuppressWarnings("unchecked") + @Override + public void doExecute( + ActionType action, + Request request, + ActionListener listener + ) { + QueryUserRequest queryUserRequest = (QueryUserRequest) request; + final QueryBuilder queryBuilder = queryUserRequest.getQueryBuilder(); + assertNotNull(queryBuilder); + assertThat(queryBuilder.getClass(), is(BoolQueryBuilder.class)); + final BoolQueryBuilder boolQueryBuilder = (BoolQueryBuilder) queryBuilder; + assertTrue(boolQueryBuilder.filter().isEmpty()); + assertTrue(boolQueryBuilder.mustNot().isEmpty()); + assertThat(boolQueryBuilder.must(), hasSize(1)); + final QueryBuilder mustQueryBuilder = boolQueryBuilder.must().get(0); + assertThat(mustQueryBuilder.getClass(), is(TermsQueryBuilder.class)); + assertThat(((TermsQueryBuilder) mustQueryBuilder).fieldName(), equalTo("username")); + assertThat(boolQueryBuilder.should(), hasSize(1)); + final QueryBuilder shouldQueryBuilder = boolQueryBuilder.should().get(0); + assertThat(shouldQueryBuilder.getClass(), is(PrefixQueryBuilder.class)); + assertThat(((PrefixQueryBuilder) shouldQueryBuilder).fieldName(), equalTo("username")); + listener.onResponse((Response) new QueryUserResponse(0, List.of())); + } + }; + final RestQueryUserAction restQueryUserAction = new RestQueryUserAction(Settings.EMPTY, mockLicenseState); + restQueryUserAction.handleRequest(restRequest, restChannel, client); + } + + assertNotNull(responseSetOnce.get()); + } + + public void testParsingSearchParameters() throws Exception { + final String requestBody = """ + { + "query": { + "match_all": {} + }, + "from": 42, + "size": 20, + "sort": [ "username", "full_name"], + "search_after": [ "bart" ] + }"""; + + final FakeRestRequest restRequest = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray(requestBody), + XContentType.JSON + ).build(); + + final SetOnce responseSetOnce = new SetOnce<>(); + final RestChannel restChannel = new AbstractRestChannel(restRequest, randomBoolean()) { + @Override + public void sendResponse(RestResponse restResponse) { + responseSetOnce.set(restResponse); + } + }; + + try (var threadPool = createThreadPool()) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { + @SuppressWarnings("unchecked") + @Override + public void doExecute( + ActionType action, + Request request, + ActionListener listener + ) { + QueryUserRequest queryUserRequest = (QueryUserRequest) request; + final QueryBuilder queryBuilder = queryUserRequest.getQueryBuilder(); + assertNotNull(queryBuilder); + assertThat(queryBuilder.getClass(), is(MatchAllQueryBuilder.class)); + assertThat(queryUserRequest.getFrom(), equalTo(42)); + assertThat(queryUserRequest.getSize(), equalTo(20)); + final List fieldSortBuilders = queryUserRequest.getFieldSortBuilders(); + assertThat(fieldSortBuilders, hasSize(2)); + + assertThat(fieldSortBuilders.get(0), equalTo(new FieldSortBuilder("username"))); + assertThat(fieldSortBuilders.get(1), equalTo(new FieldSortBuilder("full_name"))); + + final SearchAfterBuilder searchAfterBuilder = queryUserRequest.getSearchAfterBuilder(); + assertThat(searchAfterBuilder, equalTo(new SearchAfterBuilder().setSortValues(new String[] { "bart" }))); + + listener.onResponse((Response) new QueryUserResponse(0, List.of())); + } + }; + + final RestQueryUserAction queryUserAction = new RestQueryUserAction(Settings.EMPTY, mockLicenseState); + queryUserAction.handleRequest(restRequest, restChannel, client); + } + assertNotNull(responseSetOnce.get()); + } +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilderTests.java new file mode 100644 index 0000000000000..460980d318786 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/UserBoolQueryBuilderTests.java @@ -0,0 +1,221 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.support; + +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.DistanceFeatureQueryBuilder; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.MultiTermQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.index.query.SpanQueryBuilder; +import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.index.query.WildcardQueryBuilder; +import org.elasticsearch.indices.TermsLookup; +import org.elasticsearch.script.Script; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Predicate; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.collection.IsCollectionWithSize.hasSize; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.hamcrest.core.IsIterableContaining.hasItem; +import static org.hamcrest.core.StringContains.containsString; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class UserBoolQueryBuilderTests extends ESTestCase { + private static final String[] allowedIndexFieldNames = new String[] { "username", "roles", "full_name", "email", "enabled" }; + + public void testBuildFromSimpleQuery() { + final QueryBuilder query = randomSimpleQuery(); + final UserBoolQueryBuilder userQueryBuilder = UserBoolQueryBuilder.build(query); + assertCommonFilterQueries(userQueryBuilder); + final List mustQueries = userQueryBuilder.must(); + assertThat(mustQueries, hasSize(1)); + assertThat(mustQueries.get(0), equalTo(query)); + assertTrue(userQueryBuilder.should().isEmpty()); + assertTrue(userQueryBuilder.mustNot().isEmpty()); + } + + public void testBuildFromBoolQuery() { + final BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery(); + + if (randomBoolean()) { + boolQueryBuilder.must(QueryBuilders.prefixQuery(randomAllowedField(), "bar")); + } + if (randomBoolean()) { + boolQueryBuilder.should(QueryBuilders.wildcardQuery(randomAllowedField(), "*ar*")); + } + if (randomBoolean()) { + boolQueryBuilder.filter(QueryBuilders.termsQuery("roles", randomArray(3, 8, String[]::new, () -> "role-" + randomInt()))); + } + if (randomBoolean()) { + boolQueryBuilder.minimumShouldMatch(randomIntBetween(1, 2)); + } + final UserBoolQueryBuilder userBoolQueryBuilder = UserBoolQueryBuilder.build(boolQueryBuilder); + assertCommonFilterQueries(userBoolQueryBuilder); + + assertThat(userBoolQueryBuilder.must(), hasSize(1)); + assertThat(userBoolQueryBuilder.should(), empty()); + assertThat(userBoolQueryBuilder.mustNot(), empty()); + assertThat(userBoolQueryBuilder.filter(), hasItem(QueryBuilders.termQuery("type", "user"))); + assertThat(userBoolQueryBuilder.must().get(0).getClass(), is(BoolQueryBuilder.class)); + final BoolQueryBuilder translated = (BoolQueryBuilder) userBoolQueryBuilder.must().get(0); + assertThat(translated.must(), equalTo(boolQueryBuilder.must())); + assertThat(translated.should(), equalTo(boolQueryBuilder.should())); + assertThat(translated.mustNot(), equalTo(boolQueryBuilder.mustNot())); + assertThat(translated.minimumShouldMatch(), equalTo(boolQueryBuilder.minimumShouldMatch())); + assertThat(translated.filter(), equalTo(boolQueryBuilder.filter())); + } + + public void testFieldNameTranslation() { + String field = randomAllowedField(); + final WildcardQueryBuilder wildcardQueryBuilder = QueryBuilders.wildcardQuery(field, "*" + randomAlphaOfLength(3)); + final UserBoolQueryBuilder userBoolQueryBuilder = UserBoolQueryBuilder.build(wildcardQueryBuilder); + assertCommonFilterQueries(userBoolQueryBuilder); + assertThat(userBoolQueryBuilder.must().get(0), equalTo(QueryBuilders.wildcardQuery(field, wildcardQueryBuilder.value()))); + } + + public void testAllowListOfFieldNames() { + final String fieldName = randomValueOtherThanMany( + v -> Arrays.asList(allowedIndexFieldNames).contains(v), + () -> randomFrom(randomAlphaOfLengthBetween(3, 20), "type", "password") + ); + + // MatchAllQueryBuilder doesn't do any translation, so skip + final QueryBuilder q1 = randomValueOtherThanMany( + q -> q.getClass() == MatchAllQueryBuilder.class, + () -> randomSimpleQuery(fieldName) + ); + final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> UserBoolQueryBuilder.build(q1)); + + assertThat(exception.getMessage(), containsString("Field [" + fieldName + "] is not allowed")); + } + + public void testTermsLookupIsNotAllowed() { + final TermsQueryBuilder q1 = QueryBuilders.termsLookupQuery("roles", new TermsLookup("lookup", "1", "id")); + final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> UserBoolQueryBuilder.build(q1)); + assertThat(e1.getMessage(), containsString("Terms query with terms lookup is not supported for User query")); + } + + public void testDisallowedQueryTypes() { + final AbstractQueryBuilder> q1 = randomFrom( + QueryBuilders.idsQuery(), + QueryBuilders.rangeQuery(randomAlphaOfLength(5)), + QueryBuilders.matchQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.constantScoreQuery(mock(QueryBuilder.class)), + QueryBuilders.boostingQuery(mock(QueryBuilder.class), mock(QueryBuilder.class)), + QueryBuilders.queryStringQuery("q=a:42"), + QueryBuilders.simpleQueryStringQuery(randomAlphaOfLength(5)), + QueryBuilders.combinedFieldsQuery(randomAlphaOfLength(5)), + QueryBuilders.disMaxQuery(), + QueryBuilders.distanceFeatureQuery( + randomAlphaOfLength(5), + mock(DistanceFeatureQueryBuilder.Origin.class), + randomAlphaOfLength(5) + ), + QueryBuilders.fieldMaskingSpanQuery(mock(SpanQueryBuilder.class), randomAlphaOfLength(5)), + QueryBuilders.functionScoreQuery(mock(QueryBuilder.class)), + QueryBuilders.fuzzyQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.wrapperQuery(randomAlphaOfLength(5)), + QueryBuilders.matchBoolPrefixQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.matchPhraseQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.matchPhrasePrefixQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.moreLikeThisQuery(randomArray(1, 3, String[]::new, () -> randomAlphaOfLength(5))), + QueryBuilders.regexpQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.spanTermQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.spanOrQuery(mock(SpanQueryBuilder.class)), + QueryBuilders.spanContainingQuery(mock(SpanQueryBuilder.class), mock(SpanQueryBuilder.class)), + QueryBuilders.spanFirstQuery(mock(SpanQueryBuilder.class), randomIntBetween(1, 3)), + QueryBuilders.spanMultiTermQueryBuilder(mock(MultiTermQueryBuilder.class)), + QueryBuilders.spanNotQuery(mock(SpanQueryBuilder.class), mock(SpanQueryBuilder.class)), + QueryBuilders.scriptQuery(new Script(randomAlphaOfLength(5))), + QueryBuilders.scriptScoreQuery(mock(QueryBuilder.class), new Script(randomAlphaOfLength(5))), + QueryBuilders.geoWithinQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.geoBoundingBoxQuery(randomAlphaOfLength(5)), + QueryBuilders.geoDisjointQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.geoDistanceQuery(randomAlphaOfLength(5)), + QueryBuilders.geoIntersectionQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + QueryBuilders.geoShapeQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)) + ); + + final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> UserBoolQueryBuilder.build(q1)); + assertThat(e1.getMessage(), containsString("Query type [" + q1.getName() + "] is not supported for User query")); + } + + public void testWillSetAllowedFields() { + final UserBoolQueryBuilder userBoolQueryBuilder = UserBoolQueryBuilder.build(randomSimpleQuery()); + + final SearchExecutionContext context = mock(SearchExecutionContext.class); + doAnswer(invocationOnMock -> { + final Object[] args = invocationOnMock.getArguments(); + @SuppressWarnings("unchecked") + final Predicate predicate = (Predicate) args[0]; + assertTrue(predicate.getClass().getName().startsWith(UserBoolQueryBuilder.class.getName())); + testAllowedIndexFieldName(predicate); + return null; + }).when(context).setAllowedFields(any()); + try { + if (randomBoolean()) { + userBoolQueryBuilder.doToQuery(context); + } else { + userBoolQueryBuilder.doRewrite(context); + } + } catch (Exception e) { + // just ignore any exception from superclass since we only need verify the allowedFields are set + } finally { + verify(context).setAllowedFields(any()); + } + } + + private void testAllowedIndexFieldName(Predicate predicate) { + final String allowedField = randomAllowedField(); + assertTrue(predicate.test(allowedField)); + + final String disallowedField = randomBoolean() ? (randomAlphaOfLengthBetween(1, 3) + allowedField) : (allowedField.substring(1)); + assertFalse(predicate.test(disallowedField)); + } + + private void assertCommonFilterQueries(UserBoolQueryBuilder qb) { + final List tqb = qb.filter() + .stream() + .filter(q -> q.getClass() == TermQueryBuilder.class) + .map(q -> (TermQueryBuilder) q) + .toList(); + assertTrue(tqb.stream().anyMatch(q -> q.equals(QueryBuilders.termQuery("type", "user")))); + } + + private String randomAllowedField() { + return randomFrom(allowedIndexFieldNames); + } + + private QueryBuilder randomSimpleQuery() { + return randomSimpleQuery(randomAllowedField()); + } + + private QueryBuilder randomSimpleQuery(String fieldName) { + return randomFrom( + QueryBuilders.termQuery(fieldName, randomAlphaOfLengthBetween(3, 8)), + QueryBuilders.termsQuery(fieldName, randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(3, 8))), + QueryBuilders.prefixQuery(fieldName, randomAlphaOfLength(randomIntBetween(3, 10))), + QueryBuilders.wildcardQuery(fieldName, "*" + randomAlphaOfLength(randomIntBetween(3, 10))), + QueryBuilders.matchAllQuery(), + QueryBuilders.existsQuery(fieldName) + ); + } +} From be2f61a81c6c4cb999c7837d1b9d4f870eb982e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Wed, 17 Jan 2024 14:37:16 +0100 Subject: [PATCH 74/95] Add checks for old cluster features in rolling upgrade tests (#104279) Add the ability to test for the original/old cluster features during a rolling upgrade * Moving ALL_FEATURES to ESRestTestCase (and make it private - only usage) --- .../rest/yaml/CcsCommonYamlTestSuiteIT.java | 9 +++++ .../yaml/RcsCcsCommonYamlTestSuiteIT.java | 9 +++++ .../upgrades/ClusterFeatureMigrationIT.java | 10 +++--- .../upgrades/DesiredNodesUpgradeIT.java | 10 +++--- .../elasticsearch/upgrades/IndexingIT.java | 5 +-- .../ParameterizedRollingUpgradeTestCase.java | 20 +++++++++++ .../upgrades/SnapshotBasedRecoveryIT.java | 13 +++---- .../org/elasticsearch/upgrades/TsdbIT.java | 12 +++---- .../UpgradeWithOldIndexSettingsIT.java | 18 +++++----- .../test/rest/ESRestTestCase.java | 18 ++++++++-- .../test/rest/ESRestTestFeatureService.java | 14 ++++++++ .../test/rest/RestTestLegacyFeatures.java | 34 +++++++++++++++++-- .../test/rest/TestFeatureService.java | 4 ++- .../ClientYamlTestExecutionContextTests.java | 5 +++ 14 files changed, 139 insertions(+), 42 deletions(-) diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index e709b838a26f3..d91f7cf3e9a8d 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.IOUtils; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; @@ -313,6 +314,14 @@ protected ClientYamlTestExecutionContext createRestTestExecutionContext( public boolean clusterHasFeature(String featureId) { return testFeatureService.clusterHasFeature(featureId) && searchTestFeatureService.clusterHasFeature(featureId); } + + @Override + public Set getAllSupportedFeatures() { + return Sets.intersection( + testFeatureService.getAllSupportedFeatures(), + searchTestFeatureService.getAllSupportedFeatures() + ); + } }; final Set combinedOsSet = Stream.concat(osSet.stream(), Stream.of(searchOs)).collect(Collectors.toSet()); final Set combinedNodeVersions = Stream.concat(nodesVersions.stream(), searchNodeVersions.stream()) diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java index a331d6f54cb4a..ce11112bd4416 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.IOUtils; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; @@ -298,6 +299,14 @@ protected ClientYamlTestExecutionContext createRestTestExecutionContext( public boolean clusterHasFeature(String featureId) { return testFeatureService.clusterHasFeature(featureId) && searchTestFeatureService.clusterHasFeature(featureId); } + + @Override + public Set getAllSupportedFeatures() { + return Sets.intersection( + testFeatureService.getAllSupportedFeatures(), + searchTestFeatureService.getAllSupportedFeatures() + ); + } }; final Set combinedOsSet = Stream.concat(osSet.stream(), Stream.of(searchOs)).collect(Collectors.toSet()); final Set combinedNodeVersions = Stream.concat(nodesVersions.stream(), searchNodeVersions.stream()) diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ClusterFeatureMigrationIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ClusterFeatureMigrationIT.java index 2d8ff8b747323..0487b282179a9 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ClusterFeatureMigrationIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ClusterFeatureMigrationIT.java @@ -13,7 +13,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.features.FeatureService; -import org.junit.BeforeClass; +import org.junit.Before; import java.io.IOException; import java.util.List; @@ -26,11 +26,11 @@ public class ClusterFeatureMigrationIT extends ParameterizedRollingUpgradeTestCase { - @BeforeClass - public static void checkMigrationVersion() { - assumeTrue( + @Before + public void checkMigrationVersion() { + assumeFalse( "This checks migrations from before cluster features were introduced", - getOldClusterVersion().before(FeatureService.CLUSTER_FEATURES_ADDED_VERSION) + oldClusterHasFeature(FeatureService.FEATURES_SUPPORTED) ); } diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index 230ab39610b1e..1e9d3d41e6d24 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -16,11 +16,11 @@ import org.elasticsearch.client.ResponseException; import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.cluster.metadata.DesiredNodeWithStatus; -import org.elasticsearch.cluster.metadata.MetadataFeatures; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; @@ -48,13 +48,11 @@ private enum ProcessorsPrecision { } public void testUpgradeDesiredNodes() throws Exception { - assumeTrue("Desired nodes was introduced in 8.1", getOldClusterVersion().onOrAfter(Version.V_8_1_0)); + assumeTrue("Desired nodes was introduced in 8.1", oldClusterHasFeature(RestTestLegacyFeatures.DESIRED_NODE_API_SUPPORTED)); - var featureVersions = new MetadataFeatures().getHistoricalFeatures(); - - if (getOldClusterVersion().onOrAfter(featureVersions.get(DesiredNode.DOUBLE_PROCESSORS_SUPPORTED))) { + if (oldClusterHasFeature(DesiredNode.DOUBLE_PROCESSORS_SUPPORTED)) { assertUpgradedNodesCanReadDesiredNodes(); - } else if (getOldClusterVersion().onOrAfter(featureVersions.get(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED))) { + } else if (oldClusterHasFeature(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORTED)) { assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent(); } else { assertDesiredNodesWithFloatProcessorsAreRejectedInOlderVersions(); diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java index d5b5e24e2ccde..273196f392064 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.test.ListMatcher; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; @@ -229,7 +230,7 @@ private void bulk(String index, String valueSuffix, int count) throws IOExceptio } public void testTsdb() throws IOException { - assumeTrue("indexing time series indices changed in 8.2.0", getOldClusterVersion().onOrAfter(Version.V_8_2_0)); + assumeTrue("indexing time series indices changed in 8.2.0", oldClusterHasFeature(RestTestLegacyFeatures.TSDB_NEW_INDEX_FORMAT)); StringBuilder bulk = new StringBuilder(); if (isOldCluster()) { @@ -337,7 +338,7 @@ private void assertTsdbAgg(Matcher... expected) throws IOException { } public void testSyntheticSource() throws IOException { - assumeTrue("added in 8.4.0", getOldClusterVersion().onOrAfter(Version.V_8_4_0)); + assumeTrue("added in 8.4.0", oldClusterHasFeature(RestTestLegacyFeatures.SYNTHETIC_SOURCE_SUPPORTED)); if (isOldCluster()) { Request createIndex = new Request("PUT", "/synthetic"); diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java index 5a2c4c783ec85..43bc8eacac98c 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java @@ -15,6 +15,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; @@ -69,6 +70,7 @@ public static Iterable parameters() { } private static final Set upgradedNodes = new HashSet<>(); + private static final Set oldClusterFeatures = new HashSet<>(); private static boolean upgradeFailed = false; private static IndexVersion oldIndexVersion; @@ -78,6 +80,13 @@ protected ParameterizedRollingUpgradeTestCase(@Name("upgradedNodes") int upgrade this.requestedUpgradedNodes = upgradedNodes; } + @Before + public void extractOldClusterFeatures() { + if (isOldCluster() && oldClusterFeatures.isEmpty()) { + oldClusterFeatures.addAll(testFeatureService.getAllSupportedFeatures()); + } + } + @Before public void extractOldIndexVersion() throws Exception { if (oldIndexVersion == null && upgradedNodes.isEmpty()) { @@ -138,13 +147,24 @@ public void upgradeNode() throws Exception { public static void resetNodes() { oldIndexVersion = null; upgradedNodes.clear(); + oldClusterFeatures.clear(); upgradeFailed = false; } + @Deprecated // Use the new testing framework and oldClusterHasFeature(feature) instead protected static org.elasticsearch.Version getOldClusterVersion() { return org.elasticsearch.Version.fromString(OLD_CLUSTER_VERSION); } + protected static boolean oldClusterHasFeature(String featureId) { + assert oldClusterFeatures.isEmpty() == false; + return oldClusterFeatures.contains(featureId); + } + + protected static boolean oldClusterHasFeature(NodeFeature feature) { + return oldClusterHasFeature(feature.id()); + } + protected static IndexVersion getOldClusterIndexVersion() { assert oldIndexVersion != null; return oldIndexVersion; diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java index 4b765849e6ea9..ef80643c82c0d 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java @@ -13,7 +13,6 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.util.EntityUtils; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -24,6 +23,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -49,13 +49,10 @@ public SnapshotBasedRecoveryIT(@Name("upgradedNodes") int upgradedNodes) { } public void testSnapshotBasedRecovery() throws Exception { - - assumeFalse( - "Cancel shard allocation command is broken for initial desired balance versions and might allocate shard " - + "on the node where it is not supposed to be. Fixed by https://github.com/elastic/elasticsearch/pull/93635", - getOldClusterVersion() == Version.V_8_6_0 - || getOldClusterVersion() == Version.V_8_6_1 - || getOldClusterVersion() == Version.V_8_7_0 + assumeTrue( + "Cancel shard allocation command is broken for initial versions of the desired_balance allocator", + oldClusterHasFeature(RestTestLegacyFeatures.DESIRED_BALANCED_ALLOCATOR_SUPPORTED) == false + || oldClusterHasFeature(RestTestLegacyFeatures.DESIRED_BALANCED_ALLOCATOR_FIXED) ); final String indexName = "snapshot_based_recovery"; diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java index b42646164b335..3ce0fc79087c2 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java @@ -10,11 +10,11 @@ import com.carrotsearch.randomizedtesting.annotations.Name; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.FormatNames; import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import java.io.IOException; import java.time.Instant; @@ -130,10 +130,7 @@ public TsdbIT(@Name("upgradedNodes") int upgradedNodes) { """; public void testTsdbDataStream() throws Exception { - assumeTrue( - "Skipping version [" + getOldClusterVersion() + "], because TSDB was GA-ed in 8.7.0", - getOldClusterVersion().onOrAfter(Version.V_8_7_0) - ); + assumeTrue("TSDB was GA-ed in 8.7.0", oldClusterHasFeature(RestTestLegacyFeatures.TSDB_GENERALLY_AVAILABLE)); String dataStreamName = "k8s"; if (isOldCluster()) { final String INDEX_TEMPLATE = """ @@ -159,8 +156,9 @@ public void testTsdbDataStream() throws Exception { public void testTsdbDataStreamWithComponentTemplate() throws Exception { assumeTrue( - "Skipping version [" + getOldClusterVersion() + "], because TSDB was GA-ed in 8.7.0 and bug was fixed in 8.11.0", - getOldClusterVersion().onOrAfter(Version.V_8_7_0) && getOldClusterVersion().before(Version.V_8_11_0) + "TSDB was GA-ed in 8.7.0 and bug was fixed in 8.11.0", + oldClusterHasFeature(RestTestLegacyFeatures.TSDB_GENERALLY_AVAILABLE) + && (oldClusterHasFeature(RestTestLegacyFeatures.TSDB_EMPTY_TEMPLATE_FIXED) == false) ); String dataStreamName = "template-with-component-template"; if (isOldCluster()) { diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java index 9647bfb739164..3af344051030b 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java @@ -10,13 +10,13 @@ import com.carrotsearch.randomizedtesting.annotations.Name; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Strings; +import org.elasticsearch.test.rest.RestTestLegacyFeatures; import java.io.IOException; import java.util.Map; @@ -42,10 +42,7 @@ public void testOldIndexSettings() throws Exception { Request createTestIndex = new Request("PUT", "/" + INDEX_NAME); createTestIndex.setJsonEntity("{\"settings\": {\"index.indexing.slowlog.level\": \"WARN\"}}"); createTestIndex.setOptions(expectWarnings(EXPECTED_WARNING)); - if (getOldClusterVersion().before(Version.V_8_0_0)) { - // create index with settings no longer valid in 8.0 - client().performRequest(createTestIndex); - } else { + if (oldClusterHasFeature(RestTestLegacyFeatures.INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED)) { assertTrue( expectThrows(ResponseException.class, () -> client().performRequest(createTestIndex)).getMessage() .contains("unknown setting [index.indexing.slowlog.level]") @@ -53,12 +50,15 @@ public void testOldIndexSettings() throws Exception { Request createTestIndex1 = new Request("PUT", "/" + INDEX_NAME); client().performRequest(createTestIndex1); + } else { + // create index with settings no longer valid in 8.0 + client().performRequest(createTestIndex); } // add some data Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - if (getOldClusterVersion().before(Version.V_8_0_0)) { + if (oldClusterHasFeature(RestTestLegacyFeatures.INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED) == false) { bulk.setOptions(expectWarnings(EXPECTED_WARNING)); } bulk.setJsonEntity(Strings.format(""" @@ -70,7 +70,7 @@ public void testOldIndexSettings() throws Exception { // add some more data Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - if (getOldClusterVersion().before(Version.V_8_0_0)) { + if (oldClusterHasFeature(RestTestLegacyFeatures.INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED) == false) { bulk.setOptions(expectWarnings(EXPECTED_WARNING)); } bulk.setJsonEntity(Strings.format(""" @@ -79,7 +79,7 @@ public void testOldIndexSettings() throws Exception { """, INDEX_NAME)); client().performRequest(bulk); } else { - if (getOldClusterVersion().before(Version.V_8_0_0)) { + if (oldClusterHasFeature(RestTestLegacyFeatures.INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED) == false) { Request createTestIndex = new Request("PUT", "/" + INDEX_NAME + "/_settings"); // update index settings should work createTestIndex.setJsonEntity("{\"index.indexing.slowlog.level\": \"INFO\"}"); @@ -117,7 +117,7 @@ private void assertCount(String index, int countAtLeast) throws IOException { public static void updateIndexSettingsPermittingSlowlogDeprecationWarning(String index, Settings.Builder settings) throws IOException { Request request = new Request("PUT", "/" + index + "/_settings"); request.setJsonEntity(org.elasticsearch.common.Strings.toString(settings.build())); - if (getOldClusterVersion().before(Version.V_7_17_9)) { + if (oldClusterHasFeature(RestTestLegacyFeatures.DEPRECATION_WARNINGS_LEAK_FIXED) == false) { // There is a bug (fixed in 7.17.9 and 8.7.0 where deprecation warnings could leak into ClusterApplierService#applyChanges) // Below warnings are set (and leaking) from an index in this test case request.setOptions(expectVersionSpecificWarnings(v -> { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index a1af258784903..f9996bfc91204 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -131,7 +131,6 @@ import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; import static org.elasticsearch.cluster.ClusterState.VERSION_INTRODUCING_TRANSPORT_VERSIONS; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.test.rest.TestFeatureService.ALL_FEATURES; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; @@ -232,7 +231,22 @@ public enum ProductFeature { private static EnumSet availableFeatures; private static Set nodesVersions; - private static TestFeatureService testFeatureService = ALL_FEATURES; + + private static final TestFeatureService ALL_FEATURES = new TestFeatureService() { + @Override + public boolean clusterHasFeature(String featureId) { + return true; + } + + @Override + public Set getAllSupportedFeatures() { + throw new UnsupportedOperationException( + "Only available to properly initialized TestFeatureService. See ESRestTestCase#createTestFeatureService" + ); + } + }; + + protected static TestFeatureService testFeatureService = ALL_FEATURES; protected static Set getCachedNodesVersions() { assert nodesVersions != null; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java index a73c43f4fc46a..c8647f4e9c43b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java @@ -9,6 +9,7 @@ package org.elasticsearch.test.rest; import org.elasticsearch.Version; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.features.FeatureData; import org.elasticsearch.features.FeatureSpecification; @@ -22,6 +23,7 @@ class ESRestTestFeatureService implements TestFeatureService { private final Predicate historicalFeaturesPredicate; private final Set clusterStateFeatures; + private final Set allSupportedFeatures; ESRestTestFeatureService( List specs, @@ -31,6 +33,12 @@ class ESRestTestFeatureService implements TestFeatureService { var minNodeVersion = nodeVersions.stream().min(Comparator.naturalOrder()); var featureData = FeatureData.createFromSpecifications(specs); var historicalFeatures = featureData.getHistoricalFeatures(); + Set allHistoricalFeatures = historicalFeatures.lastEntry() == null ? Set.of() : historicalFeatures.lastEntry().getValue(); + + this.allSupportedFeatures = Sets.union(clusterStateFeatures, minNodeVersion.>map(v -> { + var historicalFeaturesForVersion = historicalFeatures.floorEntry(v); + return historicalFeaturesForVersion == null ? Set.of() : historicalFeaturesForVersion.getValue(); + }).orElse(allHistoricalFeatures)); this.historicalFeaturesPredicate = minNodeVersion.>map( v -> featureId -> hasHistoricalFeature(historicalFeatures, v, featureId) @@ -43,10 +51,16 @@ private static boolean hasHistoricalFeature(NavigableMap> h return features != null && features.getValue().contains(featureId); } + @Override public boolean clusterHasFeature(String featureId) { if (clusterStateFeatures.contains(featureId)) { return true; } return historicalFeaturesPredicate.test(featureId); } + + @Override + public Set getAllSupportedFeatures() { + return allSupportedFeatures; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java index fcd2f781ec58d..ca7684e60d281 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java @@ -57,10 +57,10 @@ public class RestTestLegacyFeatures implements FeatureSpecification { public static final NodeFeature ML_MEMORY_OVERHEAD_FIXED = new NodeFeature("ml.memory_overhead_fixed"); // QA - rolling upgrade tests + public static final NodeFeature DESIRED_NODE_API_SUPPORTED = new NodeFeature("desired_node_supported"); public static final NodeFeature SECURITY_UPDATE_API_KEY = new NodeFeature("security.api_key_update"); public static final NodeFeature SECURITY_BULK_UPDATE_API_KEY = new NodeFeature("security.api_key_bulk_update"); @UpdateForV9 - public static final NodeFeature WATCHES_VERSION_IN_META = new NodeFeature("watcher.version_in_meta"); @UpdateForV9 public static final NodeFeature SECURITY_ROLE_DESCRIPTORS_OPTIONAL = new NodeFeature("security.role_descriptors_optional"); @@ -76,6 +76,27 @@ public class RestTestLegacyFeatures implements FeatureSpecification { @UpdateForV9 public static final NodeFeature ML_ANALYTICS_MAPPINGS = new NodeFeature("ml.analytics_mappings"); + public static final NodeFeature TSDB_NEW_INDEX_FORMAT = new NodeFeature("indices.tsdb_new_format"); + public static final NodeFeature TSDB_GENERALLY_AVAILABLE = new NodeFeature("indices.tsdb_supported"); + + /* + * A composable index template with no template defined in the body is mistakenly always assumed to not be a time series template. + * Fixed in #98840 + */ + public static final NodeFeature TSDB_EMPTY_TEMPLATE_FIXED = new NodeFeature("indices.tsdb_empty_composable_template_fixed"); + public static final NodeFeature SYNTHETIC_SOURCE_SUPPORTED = new NodeFeature("indices.synthetic_source"); + + public static final NodeFeature DESIRED_BALANCED_ALLOCATOR_SUPPORTED = new NodeFeature("allocator.desired_balance"); + + /* + * Cancel shard allocation command is broken for initial desired balance versions + * and might allocate shard on the node where it is not supposed to be. This + * is fixed by https://github.com/elastic/elasticsearch/pull/93635. + */ + public static final NodeFeature DESIRED_BALANCED_ALLOCATOR_FIXED = new NodeFeature("allocator.desired_balance_fixed"); + public static final NodeFeature INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED = new NodeFeature("settings.indexing_slowlog_level_removed"); + public static final NodeFeature DEPRECATION_WARNINGS_LEAK_FIXED = new NodeFeature("deprecation_warnings_leak_fixed"); + // YAML public static final NodeFeature REST_ELASTIC_PRODUCT_HEADER_PRESENT = new NodeFeature("action.rest.product_header_present"); @@ -103,7 +124,16 @@ public Map getHistoricalFeatures() { entry(TRANSFORM_NEW_API_ENDPOINT, Version.V_7_5_0), entry(ML_INDICES_HIDDEN, Version.V_7_7_0), entry(ML_ANALYTICS_MAPPINGS, Version.V_7_3_0), - entry(REST_ELASTIC_PRODUCT_HEADER_PRESENT, Version.V_8_0_1) + entry(REST_ELASTIC_PRODUCT_HEADER_PRESENT, Version.V_8_0_1), + entry(DESIRED_NODE_API_SUPPORTED, Version.V_8_1_0), + entry(TSDB_NEW_INDEX_FORMAT, Version.V_8_2_0), + entry(SYNTHETIC_SOURCE_SUPPORTED, Version.V_8_4_0), + entry(DESIRED_BALANCED_ALLOCATOR_SUPPORTED, Version.V_8_6_0), + entry(DESIRED_BALANCED_ALLOCATOR_FIXED, Version.V_8_7_1), + entry(TSDB_GENERALLY_AVAILABLE, Version.V_8_7_0), + entry(TSDB_EMPTY_TEMPLATE_FIXED, Version.V_8_11_0), + entry(INDEXING_SLOWLOG_LEVEL_SETTING_REMOVED, Version.V_8_0_0), + entry(DEPRECATION_WARNINGS_LEAK_FIXED, Version.V_7_17_9) ); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java index 9de1fcf631520..332a00ce895a0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java @@ -8,8 +8,10 @@ package org.elasticsearch.test.rest; +import java.util.Set; + public interface TestFeatureService { boolean clusterHasFeature(String featureId); - TestFeatureService ALL_FEATURES = ignored -> true; + Set getAllSupportedFeatures(); } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java index 6e8397c816b3b..94b80fcc3fab3 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -29,6 +29,11 @@ private static class MockTestFeatureService implements TestFeatureService { public boolean clusterHasFeature(String featureId) { return true; } + + @Override + public Set getAllSupportedFeatures() { + return Set.of(); + } } public void testHeadersSupportStashedValueReplacement() throws IOException { From 585face0cf2c5de78dada30018df9fc01c920cb8 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 17 Jan 2024 14:55:37 +0100 Subject: [PATCH 75/95] ESQL: Support loading shapes from source into WKB blocks (#104269) This commit adds support for reading geo_shape and shape fields into wkb blocks so they can use top project the data to the result of a ES|QL query. --- docs/changelog/104269.yaml | 5 + .../esql/functions/types/mv_count.asciidoc | 2 + .../esql/functions/types/mv_first.asciidoc | 2 + .../esql/functions/types/mv_last.asciidoc | 2 + .../esql/functions/types/to_string.asciidoc | 2 + .../mapper/AbstractGeometryFieldMapper.java | 12 +- .../AbstractPointGeometryFieldMapper.java | 17 +- .../AbstractShapeGeometryFieldMapper.java | 10 +- .../elasticsearch/xpack/esql/CsvAssert.java | 4 + .../xpack/esql/CsvTestUtils.java | 10 +- .../xpack/esql/CsvTestsDataLoader.java | 53 ++-- .../src/main/resources/countries_bbox.csv | 249 ++++++++++++++++++ .../src/main/resources/countries_bbox_web.csv | 249 ++++++++++++++++++ .../resources/mapping-countries_bbox.json | 13 + .../resources/mapping-countries_bbox_web.json | 13 + .../src/main/resources/show.csv-spec | 32 ++- .../main/resources/spatial_shapes.csv-spec | 135 ++++++++++ .../ToCartesianShapeFromStringEvaluator.java | 125 +++++++++ .../ToGeoShapeFromStringEvaluator.java | 125 +++++++++ .../ToStringFromCartesianShapeEvaluator.java | 110 ++++++++ .../ToStringFromGeoShapeEvaluator.java | 110 ++++++++ .../xpack/esql/action/ColumnInfo.java | 4 +- .../xpack/esql/action/ResponseValueUtils.java | 8 +- .../xpack/esql/analysis/Verifier.java | 2 + .../operator/comparison/ComparisonMapper.java | 5 +- .../function/EsqlFunctionRegistry.java | 4 + .../scalar/convert/ToCartesianShape.java | 64 +++++ .../function/scalar/convert/ToGeoShape.java | 64 +++++ .../function/scalar/convert/ToString.java | 18 +- .../function/scalar/multivalue/MvCount.java | 2 + .../function/scalar/multivalue/MvFirst.java | 4 + .../function/scalar/multivalue/MvLast.java | 4 + .../xpack/esql/io/stream/PlanNamedTypes.java | 6 + .../esql/planner/LocalExecutionPlanner.java | 2 +- .../xpack/esql/planner/PlannerUtils.java | 11 +- .../xpack/esql/type/EsqlDataTypes.java | 9 +- .../esql/action/EsqlQueryResponseTests.java | 6 + .../xpack/esql/analysis/AnalyzerTests.java | 44 ++-- .../function/AbstractFunctionTestCase.java | 6 +- .../expression/function/TestCaseSupplier.java | 46 ++++ .../scalar/convert/ToCartesianShapeTests.java | 79 ++++++ .../scalar/convert/ToGeoShapeTests.java | 79 ++++++ .../scalar/convert/ToStringTests.java | 14 + .../AbstractMultivalueFunctionTestCase.java | 64 ++++- .../scalar/multivalue/MvCountTests.java | 2 + .../scalar/multivalue/MvFirstTests.java | 2 + .../scalar/multivalue/MvLastTests.java | 8 + .../AbstractBinaryComparisonTestCase.java | 4 +- .../test/esql/40_unsupported_types.yml | 24 +- 49 files changed, 1736 insertions(+), 129 deletions(-) create mode 100644 docs/changelog/104269.yaml create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox_web.csv create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox_web.json create mode 100644 x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeFromStringEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeFromStringEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromCartesianShapeEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromGeoShapeEvaluator.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java create mode 100644 x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java create mode 100644 x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java diff --git a/docs/changelog/104269.yaml b/docs/changelog/104269.yaml new file mode 100644 index 0000000000000..8d4b0fc5d5198 --- /dev/null +++ b/docs/changelog/104269.yaml @@ -0,0 +1,5 @@ +pr: 104269 +summary: "ESQL: Support loading shapes from source into WKB blocks" +area: "ES|QL" +type: enhancement +issues: [] diff --git a/docs/reference/esql/functions/types/mv_count.asciidoc b/docs/reference/esql/functions/types/mv_count.asciidoc index 440e66d11096e..a2e7119bab05d 100644 --- a/docs/reference/esql/functions/types/mv_count.asciidoc +++ b/docs/reference/esql/functions/types/mv_count.asciidoc @@ -3,9 +3,11 @@ v | result boolean | integer cartesian_point | integer +cartesian_shape | integer datetime | integer double | integer geo_point | integer +geo_shape | integer integer | integer ip | integer keyword | integer diff --git a/docs/reference/esql/functions/types/mv_first.asciidoc b/docs/reference/esql/functions/types/mv_first.asciidoc index e6c67a454b96b..620c7cf13b771 100644 --- a/docs/reference/esql/functions/types/mv_first.asciidoc +++ b/docs/reference/esql/functions/types/mv_first.asciidoc @@ -3,9 +3,11 @@ v | result boolean | boolean cartesian_point | cartesian_point +cartesian_shape | cartesian_shape datetime | datetime double | double geo_point | geo_point +geo_shape | geo_shape integer | integer ip | ip keyword | keyword diff --git a/docs/reference/esql/functions/types/mv_last.asciidoc b/docs/reference/esql/functions/types/mv_last.asciidoc index e6c67a454b96b..620c7cf13b771 100644 --- a/docs/reference/esql/functions/types/mv_last.asciidoc +++ b/docs/reference/esql/functions/types/mv_last.asciidoc @@ -3,9 +3,11 @@ v | result boolean | boolean cartesian_point | cartesian_point +cartesian_shape | cartesian_shape datetime | datetime double | double geo_point | geo_point +geo_shape | geo_shape integer | integer ip | ip keyword | keyword diff --git a/docs/reference/esql/functions/types/to_string.asciidoc b/docs/reference/esql/functions/types/to_string.asciidoc index 4de4af735b07f..773e396f41373 100644 --- a/docs/reference/esql/functions/types/to_string.asciidoc +++ b/docs/reference/esql/functions/types/to_string.asciidoc @@ -3,9 +3,11 @@ v | result boolean | keyword cartesian_point | keyword +cartesian_shape | keyword datetime | keyword double | keyword geo_point | keyword +geo_shape | keyword integer | keyword ip | keyword keyword | keyword diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index 187d59a88e2fd..d5098e1021a1c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -128,7 +128,7 @@ protected Object parseSourceValue(Object value) { }; } - public ValueFetcher valueFetcher(Set sourcePaths, Object nullValue, String format) { + public ValueFetcher valueFetcher(Set sourcePaths, T nullValue, String format) { Function, List> formatter = getFormatter(format != null ? format : GeometryFormatterFactory.GEOJSON); return new ArraySourceValueFetcher(sourcePaths, nullValueAsSource(nullValue)) { @Override @@ -140,7 +140,15 @@ protected Object parseSourceValue(Object value) { }; } - protected abstract Object nullValueAsSource(Object nullValue); + @Override + public BlockLoader blockLoader(BlockLoaderContext blContext) { + // Currently we can only load from source in ESQL + ValueFetcher fetcher = valueFetcher(blContext.sourcePaths(name()), nullValue, GeometryFormatterFactory.WKB); + // TODO consider optimization using BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) + return new BlockSourceReader.GeometriesBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll()); + } + + protected abstract Object nullValueAsSource(T nullValue); } private final Explicit ignoreMalformed; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java index 031b67c263505..be6e00d5c7b45 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractPointGeometryFieldMapper.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.TriFunction; -import org.elasticsearch.common.geo.GeometryFormatterFactory; import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; @@ -174,20 +173,8 @@ protected AbstractPointFieldType( } @Override - protected Object nullValueAsSource(Object nullValue) { - if (nullValue == null) { - return null; - } - SpatialPoint point = (SpatialPoint) nullValue; - return point.toWKT(); - } - - @Override - public BlockLoader blockLoader(BlockLoaderContext blContext) { - // Currently we can only load from source in ESQL - ValueFetcher fetcher = valueFetcher(blContext.sourcePaths(name()), nullValue, GeometryFormatterFactory.WKB); - // TODO consider optimization using BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) - return new BlockSourceReader.GeometriesBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll()); + protected Object nullValueAsSource(T nullValue) { + return nullValue == null ? null : nullValue.toWKT(); } } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java index c18c4db955a43..56f1faeb38a5b 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractShapeGeometryFieldMapper.java @@ -64,14 +64,8 @@ public Orientation orientation() { } @Override - public BlockLoader blockLoader(BlockLoaderContext blContext) { - // TODO: Support shapes in ESQL - return null; - } - - @Override - protected Object nullValueAsSource(Object nullValue) { - // TODO: When we support shapes in ESQL; we need to return a shape in source format here + protected Object nullValueAsSource(T nullValue) { + // we don't support null value fors shapes return nullValue; } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java index 3968c2f33fca8..8886951030c07 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java @@ -205,6 +205,10 @@ public static void assertData( expectedValue = rebuildExpected(expectedValue, BytesRef.class, x -> GEO.wkbToWkt((BytesRef) x)); } else if (expectedType == Type.CARTESIAN_POINT) { expectedValue = rebuildExpected(expectedValue, BytesRef.class, x -> CARTESIAN.wkbToWkt((BytesRef) x)); + } else if (expectedType == Type.GEO_SHAPE) { + expectedValue = rebuildExpected(expectedValue, BytesRef.class, x -> GEO.wkbToWkt((BytesRef) x)); + } else if (expectedType == Type.CARTESIAN_SHAPE) { + expectedValue = rebuildExpected(expectedValue, BytesRef.class, x -> CARTESIAN.wkbToWkt((BytesRef) x)); } else if (expectedType == Type.IP) { // convert BytesRef-packed IP to String, allowing subsequent comparison with what's expected expectedValue = rebuildExpected(expectedValue, BytesRef.class, x -> DocValueFormat.IP.format((BytesRef) x)); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index 08d837a9d802d..4e0f0b8661631 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -118,7 +118,9 @@ public static Tuple> loadPageFromCsv(URL source) throws Excep record CsvColumn(String name, Type type, BuilderWrapper builderWrapper) implements Releasable { void append(String stringValue) { - if (stringValue.contains(",")) {// multi-value field + if (stringValue.startsWith("\"") && stringValue.endsWith("\"")) { // string value + stringValue = stringValue.substring(1, stringValue.length() - 1).replace(ESCAPED_COMMA_SEQUENCE, ","); + } else if (stringValue.contains(",")) {// multi-value field builderWrapper().builder().beginPositionEntry(); String[] arrayOfValues = delimitedListToStringArray(stringValue, ","); @@ -421,7 +423,9 @@ public enum Type { ), BOOLEAN(Booleans::parseBoolean, Boolean.class), GEO_POINT(x -> x == null ? null : GEO.wktToWkb(x), BytesRef.class), - CARTESIAN_POINT(x -> x == null ? null : CARTESIAN.wktToWkb(x), BytesRef.class); + CARTESIAN_POINT(x -> x == null ? null : CARTESIAN.wktToWkb(x), BytesRef.class), + GEO_SHAPE(x -> x == null ? null : GEO.wktToWkb(x), BytesRef.class), + CARTESIAN_SHAPE(x -> x == null ? null : CARTESIAN.wktToWkb(x), BytesRef.class); private static final Map LOOKUP = new HashMap<>(); @@ -486,7 +490,7 @@ public static Type asType(ElementType elementType, Type actualType) { } private static Type bytesRefBlockType(Type actualType) { - if (actualType == GEO_POINT || actualType == CARTESIAN_POINT) { + if (actualType == GEO_POINT || actualType == CARTESIAN_POINT || actualType == GEO_SHAPE || actualType == CARTESIAN_SHAPE) { return actualType; } else { return KEYWORD; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java index 8641c2511b199..1e26a3df45419 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestsDataLoader.java @@ -57,26 +57,29 @@ public class CsvTestsDataLoader { private static final TestsDataset CLIENT_IPS = new TestsDataset("clientips", "mapping-clientips.json", "clientips.csv"); private static final TestsDataset AIRPORTS = new TestsDataset("airports", "mapping-airports.json", "airports.csv"); private static final TestsDataset AIRPORTS_WEB = new TestsDataset("airports_web", "mapping-airports_web.json", "airports_web.csv"); + private static final TestsDataset COUNTRIES_BBOX = new TestsDataset( + "countries_bbox", + "mapping-countries_bbox.json", + "countries_bbox.csv" + ); + private static final TestsDataset COUNTRIES_BBOX_WEB = new TestsDataset( + "countries_bbox_web", + "mapping-countries_bbox_web.json", + "countries_bbox_web.csv" + ); - public static final Map CSV_DATASET_MAP = Map.of( - EMPLOYEES.indexName, - EMPLOYEES, - HOSTS.indexName, - HOSTS, - APPS.indexName, - APPS, - LANGUAGES.indexName, - LANGUAGES, - UL_LOGS.indexName, - UL_LOGS, - SAMPLE_DATA.indexName, - SAMPLE_DATA, - CLIENT_IPS.indexName, - CLIENT_IPS, - AIRPORTS.indexName, - AIRPORTS, - AIRPORTS_WEB.indexName, - AIRPORTS_WEB + public static final Map CSV_DATASET_MAP = Map.ofEntries( + Map.entry(EMPLOYEES.indexName, EMPLOYEES), + Map.entry(HOSTS.indexName, HOSTS), + Map.entry(APPS.indexName, APPS), + Map.entry(LANGUAGES.indexName, LANGUAGES), + Map.entry(UL_LOGS.indexName, UL_LOGS), + Map.entry(SAMPLE_DATA.indexName, SAMPLE_DATA), + Map.entry(CLIENT_IPS.indexName, CLIENT_IPS), + Map.entry(AIRPORTS.indexName, AIRPORTS), + Map.entry(AIRPORTS_WEB.indexName, AIRPORTS_WEB), + Map.entry(COUNTRIES_BBOX.indexName, COUNTRIES_BBOX), + Map.entry(COUNTRIES_BBOX_WEB.indexName, COUNTRIES_BBOX_WEB) ); private static final EnrichConfig LANGUAGES_ENRICH = new EnrichConfig("languages_policy", "enrich-policy-languages.json"); @@ -317,16 +320,22 @@ private static void loadCsvData( if (multiValues.length > 0) {// multi-value StringBuilder rowStringValue = new StringBuilder("["); for (String s : multiValues) { - rowStringValue.append("\"" + s + "\","); + if (entries[i].startsWith("\"") == false || entries[i].endsWith("\"") == false) { + rowStringValue.append("\"" + s + "\","); + } else { + rowStringValue.append(s + ","); + } } // remove the last comma and put a closing bracket instead rowStringValue.replace(rowStringValue.length() - 1, rowStringValue.length(), "]"); entries[i] = rowStringValue.toString(); } else { - entries[i] = "\"" + entries[i] + "\""; + if (entries[i].startsWith("\"") == false || entries[i].endsWith("\"") == false) { + entries[i] = "\"" + entries[i] + "\""; + } } // replace any escaped commas with single comma - entries[i].replace(ESCAPED_COMMA_SEQUENCE, ","); + entries[i] = entries[i].replace(ESCAPED_COMMA_SEQUENCE, ","); row.append("\"" + columns[i] + "\":" + entries[i]); } catch (Exception e) { throw new IllegalArgumentException( diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox.csv new file mode 100644 index 0000000000000..f8701f386e73b --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox.csv @@ -0,0 +1,249 @@ +id:keyword,name:keyword,shape:geo_shape +FLK,Falkland Is.,"BBOX(-61.148055\, -57.733200\, -51.249455\, -52.343055)" +GUF,French Guiana,"BBOX(-54.603782\, -51.648055\, 5.755418\, 2.113473)" +GUY,Guyana,"BBOX(-61.389727\, -56.470636\, 8.535273\, 1.186873)" +PCN,Pitcairn Is.,"BBOX(-130.105055\, -128.286118\, -24.325836\, -25.082227)" +SGS,South Georgia & the South Sandwich Is.,"BBOX(-38.023755\, -26.241391\, -53.989727\, -58.498609)" +SHN,St. Helena,"BBOX(-5.792782\, -5.645282\, -15.903755\, -16.021946)" +SUR,Suriname,"BBOX(-58.071400\, -53.986118\, 6.001809\, 1.836245)" +TTO,Trinidad & Tobago,"BBOX(-61.921600\, -60.520836\, 11.345554\, 10.040345)" +VEN,Venezuela,"BBOX(-73.378064\, -59.803055\, 12.197500\, 0.649164)" +ASM,American Samoa,"BBOX(-170.823227\, -170.561873\, -14.254309\, -14.375555)" +COK,Cook Is.,"BBOX(-165.848345\, -157.703764\, -10.881318\, -21.940836)" +PYF,French Polynesia,"BBOX(-151.497773\, -138.809755\, -8.778191\, -17.870836)" +UMI,Jarvis I.,"BBOX(-160.045164\, -160.009464\, -0.374309\, -0.398055)" +NIU,Niue,"BBOX(-169.952236\, -169.781555\, -18.963336\, -19.145555)" +WSM,Samoa,"BBOX(-172.780027\, -171.429200\, -13.460555\, -14.057500)" +TKL,Tokelau,"BBOX(-171.862718\, -171.843764\, -9.170627\, -9.218891)" +TON,Tonga,"BBOX(-175.360000\, -173.906827\, -18.568055\, -21.268064)" +WLF,Wallis & Futuna,"BBOX(-178.190273\, -176.121936\, -13.214864\, -14.323891)" +ARG,Argentina,"BBOX(-73.582300\, -53.650009\, -21.780518\, -55.051673)" +BOL,Bolivia,"BBOX(-69.656191\, -57.521118\, -9.679191\, -22.901109)" +BRA,Brazil,"BBOX(-74.004591\, -34.792918\, 5.272709\, -33.741118)" +CHL,Chile,"BBOX(-109.446109\, -66.420627\, -17.505282\, -55.902227)" +ECU,Ecuador,"BBOX(-91.663891\, -75.216846\, 1.437782\, -5.000309)" +PRY,Paraguay,"BBOX(-62.643773\, -54.243900\, -19.296809\, -27.584727)" +PER,Peru,"BBOX(-81.355146\, -68.673909\, -0.036873\, -18.348546)" +URY,Uruguay,"BBOX(-58.438609\, -53.098300\, -30.096673\, -34.943818)" +UMI,Baker I.,"BBOX(-176.467655\, -176.455855\, 0.222573\, 0.215282)" +CAN,Canada,"BBOX(-141.002991\, -52.617364\, 83.113873\, 41.675554)" +GTM,Guatemala,"BBOX(-92.246782\, -88.214736\, 17.821109\, 13.745836)" +UMI,Howland I.,"BBOX(-176.643082\, -176.631091\, 0.808609\, 0.790282)" +UMI,Johnston Atoll,"BBOX(-169.538936\, -169.523927\, 16.730273\, 16.724164)" +MEX,Mexico,"BBOX(-118.404164\, -86.738618\, 32.718454\, 14.550545)" +UMI,Midway Is.,"BBOX(-177.395845\, -177.360545\, 28.221518\, 28.184154)" +BRB,Barbados,"BBOX(-59.659446\, -59.427082\, 13.337082\, 13.050554)" +DMA,Dominica,"BBOX(-61.491391\, -61.250700\, 15.631945\, 15.198054)" +GRD,Grenada,"BBOX(-61.785182\, -61.596391\, 12.237154\, 11.996945)" +GLP,Guadeloupe,"BBOX(-61.796109\, -61.187082\, 16.512918\, 15.870000)" +MTQ,Martinique,"BBOX(-61.231536\, -60.816946\, 14.880136\, 14.402773)" +LCA,St. Lucia,"BBOX(-61.079582\, -60.878064\, 14.109309\, 13.709445)" +SPM,St. Pierre & Miquelon,"BBOX(-56.397782\, -56.145500\, 47.135827\, 46.747191)" +VCT,St. Vincent & the Grenadines,"BBOX(-61.280146\, -61.120282\, 13.383191\, 13.130282)" +ABW,Aruba,"BBOX(-70.059664\, -69.874864\, 12.627773\, 12.411109)" +BMU,Bermuda,"BBOX(-64.823064\, -64.676809\, 32.379509\, 32.260554)" +DOM,Dominican Republic,"BBOX(-72.003064\, -68.322927\, 19.930827\, 17.604164)" +HTI,Haiti,"BBOX(-74.467791\, -71.629182\, 20.091454\, 18.022782)" +JAM,Jamaica,"BBOX(-78.373900\, -76.221118\, 18.522500\, 17.697218)" +ANT,Netherlands Antilles,"BBOX(-69.163618\, -68.192927\, 12.383891\, 12.020554)" +BHS,The Bahamas,"BBOX(-78.978900\, -72.738891\, 26.929164\, 20.915273)" +TCA,Turks & Caicos Is.,"BBOX(-72.031464\, -71.127573\, 21.957773\, 21.429918)" +BLZ,Belize,"BBOX(-89.216400\, -87.779591\, 18.489900\, 15.889854)" +CYM,Cayman Is.,"BBOX(-81.400836\, -81.093064\, 19.354164\, 19.265000)" +COL,Colombia,"BBOX(-81.720146\, -66.870455\, 12.590273\, -4.236873)" +CRI,Costa Rica,"BBOX(-85.911391\, -82.561400\, 11.212845\, 8.025673)" +CUB,Cuba,"BBOX(-84.952927\, -74.131255\, 23.194027\, 19.821945)" +SLV,El Salvador,"BBOX(-90.108064\, -87.694673\, 14.431982\, 13.156391)" +HND,Honduras,"BBOX(-89.350491\, -83.131855\, 16.435827\, 12.985173)" +NIC,Nicaragua,"BBOX(-87.689827\, -83.131855\, 15.022218\, 10.709691)" +PAN,Panama,"BBOX(-83.030291\, -77.198336\, 9.620136\, 7.206109)" +AIA,Anguilla,"BBOX(-63.167782\, -62.972709\, 18.272982\, 18.164445)" +ATG,Antigua & Barbuda,"BBOX(-61.891109\, -61.666946\, 17.724300\, 16.989718)" +VGB,British Virgin Is.,"BBOX(-64.698482\, -64.324527\, 18.504854\, 18.383891)" +MSR,Montserrat,"BBOX(-62.236946\, -62.138891\, 16.812354\, 16.671391)" +PRI,Puerto Rico,"BBOX(-67.266400\, -65.301118\, 18.519445\, 17.922218)" +KNA,St. Kitts & Nevis,"BBOX(-62.862782\, -62.622509\, 17.410136\, 17.208882)" +VIR,Virgin Is.,"BBOX(-65.023509\, -64.562573\, 18.387673\, 17.676664)" +FRO,Faroe Is.,"BBOX(-7.433473\, -6.389718\, 62.357500\, 61.388327)" +GRL,Greenland,"BBOX(-73.053609\, -12.157637\, 83.623600\, 59.790273)" +XGK,Guernsey,"BBOX(-2.668609\, -2.500973\, 49.508191\, 49.422491)" +ISL,Iceland,"BBOX(-24.538400\, -13.499446\, 66.536100\, 63.390000)" +IRL,Ireland,"BBOX(-10.474727\, -6.013055\, 55.379991\, 51.445545)" +XIM,Isle of Man,"BBOX(-4.787155\, -4.308682\, 54.416382\, 54.055545)" +SJM,Jan Mayen,"BBOX(-9.119909\, -7.928509\, 71.180818\, 70.803863)" +XJE,Jersey,"BBOX(-2.247364\, -2.015000\, 49.261109\, 49.167773)" +GBR,United Kingdom,"BBOX(-8.171664\, 1.749445\, 60.843327\, 49.955273)" +CPV,Cape Verde,"BBOX(-25.360555\, -22.666109\, 17.192364\, 14.811109)" +CIV,Cote d'Ivoire,"BBOX(-8.606382\, -2.487782\, 10.735254\, 4.344718)" +GHA,Ghana,"BBOX(-3.248891\, 1.202782\, 11.155691\, 4.727082)" +GIB,Gibraltar,"BBOX(-5.356173\, -5.334509\, 36.163309\, 36.112073)" +LBR,Liberia,"BBOX(-11.492327\, -7.368400\, 8.512782\, 4.343609)" +MAR,Morocco,"BBOX(-13.174964\, -1.011809\, 35.919164\, 27.664236)" +PRT,Portugal,"BBOX(-31.289027\, -6.190455\, 42.150673\, 32.637500)" +ESP,Spain,"BBOX(-18.169864\, 4.316945\, 43.764300\, 27.637500)" +ESH,Western Sahara,"BBOX(-17.101527\, -8.666391\, 27.666954\, 20.764100)" +BFA,Burkina Faso,"BBOX(-5.520837\, 2.397927\, 15.082773\, 9.395691)" +GIN,Guinea,"BBOX(-15.080837\, -7.653373\, 12.677500\, 7.193927)" +GNB,Guinea-Bissau,"BBOX(-16.717773\, -13.643891\, 12.684718\, 10.925100)" +MLI,Mali,"BBOX(-12.244837\, 4.251391\, 25.000273\, 10.142154)" +MRT,Mauritania,"BBOX(-17.075555\, -4.806109\, 27.290454\, 14.725636)" +SEN,Senegal,"BBOX(-17.532782\, -11.369927\, 16.690618\, 12.301745)" +SLE,Sierra Leone,"BBOX(-13.295609\, -10.264309\, 9.997500\, 6.923609)" +GMB,The Gambia,"BBOX(-16.821664\, -13.798609\, 13.826391\, 13.059973)" +DJI,Djibouti,"BBOX(41.759854\, 43.420409\, 12.708327\, 10.942218)" +ERI,Eritrea,"BBOX(36.443282\, 43.121382\, 17.994882\, 12.363891)" +ETH,Ethiopia,"BBOX(32.991800\, 47.988245\, 14.883609\, 3.406664)" +MNG,Mongolia,"BBOX(87.761100\, 119.931509\, 52.142773\, 41.586654)" +SDN,Sudan,"BBOX(21.829100\, 38.607500\, 22.232218\, 3.493391)" +UGA,Uganda,"BBOX(29.574300\, 35.009718\, 4.222782\, -1.476109)" +ISR,Gaza Strip,"BBOX(34.216663\, 34.558891\, 31.596100\, 31.216545)" +IRQ,Iraq,"BBOX(38.794700\, 48.560691\, 37.383673\, 29.061664)" +ISR,Israel,"BBOX(34.267582\, 35.681109\, 33.270273\, 29.486709)" +JOR,Jordan,"BBOX(34.960418\, 39.301109\, 33.377591\, 29.188891)" +KAZ,Kazakhstan,"BBOX(46.499163\, 87.348209\, 55.442627\, 40.594436)" +NOR,Norway,"BBOX(4.789582\, 31.073536\, 71.154709\, 57.987918)" +RUS,Russia,"BBOX(-180.000000\, 180.000000\, 81.851927\, 41.196582)" +SWE,Sweden,"BBOX(11.113336\, 24.167009\, 69.060300\, 55.339164)" +ISR,West Bank,"BBOX(34.888191\, 35.570609\, 32.546391\, 31.350691)" +DZA,Algeria,"BBOX(-8.667218\, 11.986473\, 37.089854\, 18.976391)" +AND,Andorra,"BBOX(1.421391\, 1.781718\, 42.655964\, 42.436382)" +CMR,Cameroon,"BBOX(8.502363\, 16.207000\, 13.085000\, 1.654164)" +CAF,Central African Republic,"BBOX(14.418891\, 27.459718\, 11.000836\, 2.221264)" +LBY,Libya,"BBOX(9.311391\, 25.151663\, 33.171136\, 19.499064)" +MCO,Monaco,"BBOX(7.390900\, 7.439291\, 43.768300\, 43.727545)" +TUN,Tunisia,"BBOX(7.492218\, 11.581663\, 37.340409\, 30.234391)" +BEN,Benin,"BBOX(0.776663\, 3.855000\, 12.396654\, 6.218718)" +TCD,Chad,"BBOX(13.461945\, 24.002745\, 23.450554\, 7.458536)" +GNQ,Equatorial Guinea,"BBOX(8.424163\, 11.353891\, 3.763336\, 0.930154)" +KIR,Kiribati,"BBOX(-157.581700\, 172.947509\, 2.033054\, 1.335991)" +NER,Niger,"BBOX(0.166663\, 15.996663\, 23.522309\, 11.693273)" +NGA,Nigeria,"BBOX(2.692500\, 14.649654\, 13.891500\, 4.272845)" +STP,Sao Tome & Principe,"BBOX(6.465136\, 7.463473\, 1.701245\, 0.018336)" +TGO,Togo,"BBOX(-0.149764\, 1.797800\, 11.138536\, 6.100545)" +ALB,Albania,"BBOX(19.288536\, 21.053327\, 42.660345\, 39.645000)" +BIH,Bosnia & Herzegovina,"BBOX(15.740591\, 19.619782\, 45.265945\, 42.565827)" +HRV,Croatia,"BBOX(13.504791\, 19.425000\, 46.535827\, 42.399991)" +ITA,Italy,"BBOX(6.623963\, 18.514445\, 47.094582\, 36.649164)" +MKD,Macedonia,"BBOX(20.458818\, 23.030973\, 42.358954\, 40.855891)" +MLT,Malta,"BBOX(14.329100\, 14.570000\, 35.991936\, 35.800000)" +SMR,San Marino,"BBOX(12.406945\, 12.511109\, 43.986873\, 43.898682)" +SMN,Serbia & Montenegro,"BBOX(18.453327\, 23.005000\, 46.181109\, 41.849000)" +VTC,Vatican City,"BBOX(12.444473\, 12.457718\, 41.908391\, 41.900891)" +BGR,Bulgaria,"BBOX(22.365273\, 28.605136\, 44.224718\, 41.243045)" +CYP,Cyprus,"BBOX(32.269863\, 34.586036\, 35.688609\, 34.640273)" +EGY,Egypt,"BBOX(24.706800\, 36.895827\, 31.646945\, 21.994164)" +GEO,Georgia,"BBOX(40.002963\, 46.710818\, 43.584718\, 41.048045)" +GRC,Greece,"BBOX(19.640000\, 28.238045\, 41.747773\, 34.930545)" +LBN,Lebanon,"BBOX(35.100827\, 36.623745\, 34.647500\, 33.062082)" +SYR,Syria,"BBOX(35.614463\, 42.378327\, 37.290545\, 32.313609)" +TUR,Turkey,"BBOX(25.665827\, 44.820545\, 42.109991\, 35.818445)" +AUT,Austria,"BBOX(9.533573\, 17.166382\, 49.018745\, 46.407491)" +CZE,Czech Republic,"BBOX(12.093700\, 18.852218\, 51.052491\, 48.581382)" +DNK,Denmark,"BBOX(8.092918\, 15.149163\, 57.745973\, 54.561936)" +HUN,Hungary,"BBOX(16.111800\, 22.894800\, 48.576173\, 45.748327)" +POL,Poland,"BBOX(14.147636\, 24.143473\, 54.836036\, 49.002918)" +SVK,Slovakia,"BBOX(16.844718\, 22.558054\, 49.600827\, 47.737500)" +SVN,Slovenia,"BBOX(13.383473\, 16.607873\, 46.876245\, 45.425818)" +SJM,Svalbard,"BBOX(10.487918\, 33.637500\, 80.764163\, 74.343045)" +BEL,Belgium,"BBOX(2.541663\, 6.398200\, 51.501245\, 49.508882)" +FRA,France,"BBOX(-4.790282\, 9.562218\, 51.091109\, 41.364927)" +DEU,Germany,"BBOX(5.865000\, 15.033818\, 55.056527\, 47.274718)" +LIE,Liechtenstein,"BBOX(9.474636\, 9.633891\, 47.274545\, 47.057454)" +LUX,Luxembourg,"BBOX(5.734445\, 6.524027\, 50.181809\, 49.448464)" +NLD,Netherlands,"BBOX(3.370863\, 7.210973\, 53.465827\, 50.753882)" +CHE,Switzerland,"BBOX(5.967009\, 10.488209\, 47.806664\, 45.829436)" +USA,United States,"BBOX(-178.216555\, 179.775936\, 71.351436\, 18.925482)" +BLR,Belarus,"BBOX(23.165400\, 32.740054\, 56.167491\, 51.251845)" +EST,Estonia,"BBOX(21.837354\, 28.194091\, 59.664718\, 57.522636)" +FIN,Finland,"BBOX(19.511391\, 31.581963\, 70.088609\, 59.806800)" +LVA,Latvia,"BBOX(20.968609\, 28.235963\, 58.083254\, 55.674836)" +LTU,Lithuania,"BBOX(20.942836\, 26.813054\, 56.449854\, 53.890336)" +MDA,Moldova,"BBOX(26.634991\, 30.128709\, 48.468318\, 45.448645)" +ROM,Romania,"BBOX(20.261027\, 29.672218\, 48.263882\, 43.623309)" +UKR,Ukraine,"BBOX(22.151445\, 40.178745\, 52.378600\, 44.379154)" +IND,India,"BBOX(68.144227\, 97.380536\, 35.505618\, 6.745827)" +MDV,Maldives,"BBOX(72.863391\, 73.637272\, 7.027773\, -0.641664)" +OMN,Oman,"BBOX(51.999291\, 59.847082\, 26.368709\, 16.642782)" +SOM,Somalia,"BBOX(40.988609\, 51.411318\, 11.979164\, -1.674873)" +LKA,Sri Lanka,"BBOX(79.696091\, 81.891663\, 9.828191\, 5.918054)" +TKM,Turkmenistan,"BBOX(51.250182\, 66.670882\, 42.796173\, 35.145991)" +UZB,Uzbekistan,"BBOX(55.997491\, 73.167545\, 45.570591\, 37.184991)" +YEM,Yemen,"BBOX(42.555973\, 54.473473\, 18.999345\, 12.144718)" +ARM,Armenia,"BBOX(43.454163\, 46.620536\, 41.297054\, 38.841145)" +AZE,Azerbaijan,"BBOX(44.778863\, 51.677009\, 42.710754\, 38.262809)" +BHR,Bahrain,"BBOX(50.453327\, 50.796391\, 26.288891\, 25.571945)" +IRN,Iran,"BBOX(44.034954\, 63.330273\, 39.779154\, 25.075973)" +KWT,Kuwait,"BBOX(46.546945\, 48.416591\, 30.084164\, 28.538882)" +QAT,Qatar,"BBOX(50.751936\, 51.615827\, 26.152500\, 24.556045)" +SAU,Saudi Arabia,"BBOX(34.572145\, 55.666109\, 32.154945\, 16.377500)" +ARE,United Arab Emirates,"BBOX(51.583327\, 56.381663\, 26.083882\, 22.633327)" +AFG,Afghanistan,"BBOX(60.504163\, 74.915736\, 38.471982\, 29.406109)" +KGZ,Kyrgyzstan,"BBOX(69.249500\, 80.281582\, 43.216900\, 39.195473)" +NPL,Nepal,"BBOX(80.052200\, 88.194554\, 30.424718\, 26.368364)" +PAK,Pakistan,"BBOX(60.866300\, 77.823927\, 37.060791\, 23.688045)" +TJK,Tajikistan,"BBOX(67.364700\, 75.187482\, 41.049254\, 36.671845)" +BGD,Bangladesh,"BBOX(88.043872\, 92.669345\, 26.626136\, 20.744818)" +BTN,Bhutan,"BBOX(88.751936\, 92.114218\, 28.325000\, 26.703609)" +BRN,Brunei,"BBOX(114.095082\, 115.360263\, 5.053054\, 4.018191)" +CHN,China,"BBOX(73.620045\, 134.768463\, 53.553745\, 18.168882)" +JPN,Japan,"BBOX(123.678863\, 145.812409\, 45.486382\, 24.251391)" +PRK,North Korea,"BBOX(124.323954\, 130.697418\, 43.006100\, 37.671382)" +PLW,Palau,"BBOX(134.452482\, 134.658872\, 7.729445\, 7.305254)" +PHL,Philippines,"BBOX(116.950000\, 126.598036\, 19.391109\, 5.049164)" +KOR,South Korea,"BBOX(126.099018\, 129.586872\, 38.625245\, 33.192209)" +KHM,Cambodia,"BBOX(102.346509\, 107.636382\, 14.708618\, 10.422736)" +LAO,Laos,"BBOX(100.091372\, 107.695254\, 22.499927\, 13.926664)" +MYS,Malaysia,"BBOX(99.641936\, 119.275818\, 7.352918\, 0.852782)" +MMR,Myanmar,"BBOX(92.204991\, 101.169427\, 28.546527\, 9.839582)" +SGP,Singapore,"BBOX(103.640945\, 103.997945\, 1.445282\, 1.259027)" +THA,Thailand,"BBOX(97.347272\, 105.639291\, 20.454582\, 5.633473)" +VNM,Vietnam,"BBOX(102.140745\, 109.464845\, 23.324164\, 8.559236)" +GUM,Guam,"BBOX(144.634154\, 144.953309\, 13.652291\, 13.235000)" +MHL,Marshall Is.,"BBOX(162.324963\, 171.378063\, 14.594027\, 5.600273)" +FSM,Micronesia,"BBOX(158.120100\, 163.042891\, 6.977636\, 5.261664)" +MNP,Northern Mariana Is.,"BBOX(145.572682\, 145.818082\, 15.268191\, 14.908054)" +UMI,Wake I.,"BBOX(166.608981\, 166.662200\, 19.324582\, 19.279445)" +BWA,Botswana,"BBOX(19.996109\, 29.373618\, -17.782082\, -26.875555)" +BDI,Burundi,"BBOX(28.985000\, 30.853191\, -2.301564\, -4.448055)" +ATF,French Southern & Antarctic Lands,"BBOX(51.650836\, 70.567491\, -46.327645\, -49.725009)" +HMD,Heard I. & McDonald Is.,"BBOX(73.234709\, 73.773882\, -52.965145\, -53.199445)" +KEN,Kenya,"BBOX(33.907218\, 41.905163\, 4.622500\, -4.669618)" +RWA,Rwanda,"BBOX(28.854445\, 30.893263\, -1.054446\, -2.825491)" +TZA,Tanzania,"BBOX(29.340827\, 40.436809\, -0.997218\, -11.740418)" +ZMB,Zambia,"BBOX(21.996391\, 33.702282\, -8.191664\, -18.074918)" +ZWE,Zimbabwe,"BBOX(25.237918\, 33.071591\, -15.616527\, -22.414764)" +ATA,Antarctica,"BBOX(-180.000000\, 180.000000\, -60.503336\, -90.000000)" +NOR,Bouvet I.,"BBOX(3.342363\, 3.484163\, -54.383609\, -54.462782)" +COM,Comoros,"BBOX(43.214027\, 44.530418\, -11.366946\, -12.383055)" +REU,Juan De Nova I.,"BBOX(42.723818\, 42.760900\, -17.052018\, -17.076118)" +LSO,Lesotho,"BBOX(27.013973\, 29.455554\, -28.570691\, -30.650527)" +MWI,Malawi,"BBOX(32.681873\, 35.920963\, -9.376673\, -17.135282)" +MOZ,Mozambique,"BBOX(30.213018\, 40.846109\, -10.471109\, -26.860282)" +ZAF,South Africa,"BBOX(16.483327\, 37.892218\, -22.136391\, -46.969727)" +SWZ,Swaziland,"BBOX(30.798336\, 32.133400\, -25.728336\, -27.316391)" +AGO,Angola,"BBOX(11.731245\, 24.084445\, -4.388991\, -18.016391)" +COG,Congo,"BBOX(11.140663\, 18.643609\, 3.711109\, -5.015000)" +ZAR,Congo\, DRC,"BBOX(12.214554\, 31.302773\, 5.380691\, -13.458055)" +FJI,Fiji,"BBOX(-180.000000\, 180.000000\, -16.153473\, -19.162782)" +GAB,Gabon,"BBOX(8.700836\, 14.519582\, 2.317900\, -3.925282)" +NAM,Namibia,"BBOX(11.716391\, 25.264427\, -16.954173\, -28.961873)" +NZL,New Zealand,"BBOX(-176.848755\, 178.841063\, -34.414718\, -52.578055)" +IOT,British Indian Ocean Territory,"BBOX(72.357900\, 72.494282\, -7.233473\, -7.436246)" +REU,Glorioso Is.,"BBOX(47.279091\, 47.303054\, -11.554100\, -11.577782)" +MDG,Madagascar,"BBOX(43.236827\, 50.501391\, -11.945555\, -25.588336)" +MUS,Mauritius,"BBOX(57.306309\, 63.495754\, -19.673336\, -20.520555)" +MYT,Mayotte,"BBOX(45.039163\, 45.293345\, -12.662500\, -12.992500)" +REU,Reunion,"BBOX(55.220554\, 55.853054\, -20.856527\, -21.373891)" +SYC,Seychelles,"BBOX(46.205691\, 55.540554\, -4.551664\, -9.463055)" +CXR,Christmas I.,"BBOX(105.629000\, 105.751900\, -10.384082\, -10.510973)" +CCK,Cocos Is.,"BBOX(96.817491\, 96.864845\, -12.130418\, -12.199446)" +IDN,Indonesia,"BBOX(95.210945\, 141.007018\, 5.913473\, -10.929655)" +TLS,Timor Leste,"BBOX(124.046100\, 127.308591\, -8.140000\, -9.463627)" +AUS,Australia,"BBOX(112.907209\, 158.960372\, -10.135691\, -54.753891)" +NRU,Nauru,"BBOX(166.904418\, 166.957045\, -0.493336\, -0.552218)" +NCL,New Caledonia,"BBOX(163.982745\, 168.130509\, -20.087918\, -22.673891)" +NFK,Norfolk I.,"BBOX(167.910945\, 167.998872\, -29.000555\, -29.081109)" +PNG,Papua New Guinea,"BBOX(140.858854\, 155.966845\, -1.355282\, -11.642500)" +SLB,Solomon Is.,"BBOX(155.671300\, 166.931836\, -6.605518\, -11.845836)" +TUV,Tuvalu,"BBOX(176.295254\, 179.232281\, -6.089446\, -8.561291)" +VUT,Vanuatu,"BBOX(166.521636\, 169.893863\, -13.707218\, -20.254173)" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox_web.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox_web.csv new file mode 100644 index 0000000000000..aa540d40ad604 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/countries_bbox_web.csv @@ -0,0 +1,249 @@ +id:keyword,name:keyword,shape:cartesian_shape +FLK, Falkland Is., "BBOX(-6806970.344651548\, -6426830.424971599\, -6665538.61144021\, -6862393.473674134)" +GUF, French Guiana, "BBOX(-6078465.2067807885\, -5749435.182262659\, 641770.3972926841\, 235324.11002137093)" +GUY, Guyana, "BBOX(-6833873.148626795\, -6286282.4434172455\, 953676.0708782381\, 132131.5480264357)" +PCN, Pitcairn Is., "BBOX(-1.448322847021477E7\, -1.428074532961791E7\, -2793163.209148463\, -2885847.742584221)" +SGS, South Georgia & the South Sandwich Is., "BBOX(-4232785.044058981\, -2921178.2834205604\, -7168210.949791082\, -8072797.261021951)" +SHN, St. Helena, "BBOX(-644849.5424266771\, -628429.9175369549\, -1793579.7338931332\, -1807264.3754193506)" +SUR, Suriname, "BBOX(-6464478.676752644\, -6009707.164829022\, 669343.5434865113\, 204444.85915446977)" +TTO, Trinidad & Tobago, "BBOX(-6893080.980145244\, -6737148.644965401\, 1271316.8958092\, 1123450.7268402777)" +VEN, Venezuela, "BBOX(-8168408.718739186\, -6657245.629555437\, 1368193.4618250781\, 72266.15206230174)" +ASM, American Samoa, "BBOX(-1.901595464265674E7\, -1.8986860848464E7\, -1603409.0061145446\, -1617338.5456514952)" +COK, Cook Is., "BBOX(-1.8462153311737206E7\, -1.755550270221884E7\, -1218650.60324631\, -2504423.3700605934)" +PYF, French Polynesia, "BBOX(-1.686465494432737E7\, -1.5452231241588091E7\, -981029.2116948966\, -2022435.6471389162)" +UMI, Jarvis I., "BBOX(-1.781614615792593E7\, -1.7812172052105166E7\, -41668.183668037316\, -44311.636360225275)" +NIU, Niue, "BBOX(-1.8918996368064713E7\, -1.889999624605927E7\, -2150619.798091522\, -2172080.175292089)" +WSM, Samoa, "BBOX(-1.9233784622210693E7\, -1.9083411248441823E7\, -1512399.049561015\, -1580814.199108954)" +TKL, Tokelau, "BBOX(-1.913167025144482E7\, -1.912956030181662E7\, -1025256.50252298\, -1030699.159919998)" +TON, Tonga, "BBOX(-1.9520985902791113E7\, -1.9359219424419094E7\, -2104146.80131666\, -2423871.209298853)" +WLF, Wallis & Futuna, "BBOX(-1.98360504519132E7\, -1.9605804230316367E7\, -1484290.6690231054\, -1611402.1249494848)" +ARG, Argentina, "BBOX(-8191144.166257678\, -5972291.682103194\, -2485194.106818803\, -7371901.253043402)" +BOL, Bolivia, "BBOX(-7754091.711639628\, -6403221.564728467\, -1082644.4605265881\, -2620063.8163838163)" +BRA, Brazil, "BBOX(-8238153.385337716\, -3873129.9144329783\, 587785.5079629741\, -3994093.243498929)" +CHL, Chile, "BBOX(-1.2183485121489162E7\, -7393910.374780716\, -1979723.0325789037\, -7538976.386388264)" +ECU, Ecuador, "BBOX(-1.0203977668829728E7\, -8373100.994630531\, 160069.96058917182\, -557339.7863215066)" +PRY, Paraguay, "BBOX(-6973472.910758704\, -6038403.325800699\, -2189911.7242244524\, -3196717.5348766074)" +PER, Peru, "BBOX(-9056413.424871765\, -7644744.579599449\, -4104.683866786337\, -2078385.864447083)" +URY, Uruguay, "BBOX(-6505356.195641661\, -5910875.717165678\, -3515982.318158614\, -4156248.8527274607)" +UMI, Baker I., "BBOX(-1.964428949334857E7\, -1.9642975923357394E7\, 24776.775336047573\, 23965.139003268785)" +CAN, Canada, "BBOX(-1.5696381156263582E7\, -5857338.166548977\, 1.7926778413967136E7\, 5112502.227274475)" +GTM, Guatemala, "BBOX(-1.0268864798128676E7\, -9820019.490616102\, 2016620.2477192462\, 1545072.9951440636)" +UMI, Howland I., "BBOX(-1.966381793765724E7\, -1.9662483105643325E7\, 90016.93033465231\, 87976.57940884378)" +UMI, Johnston Atoll, "BBOX(-1.8872988022526257E7\, -1.8871317228289172E7\, 1889449.6904405674\, 1888739.592498257)" +MEX, Mexico, "BBOX(-1.3180691242448486E7\, -9655698.786528189\, 3857992.7910224693\, 1637455.8925958527)" +UMI, Midway Is., "BBOX(-1.9747615131493594E7\, -1.974368555346914E7\, 3276930.956339718\, 3272211.297114333)" +BRB, Barbados, "BBOX(-6641259.148804331\, -6615392.506649243\, 1498269.4980028346\, 1465508.5364990495)" +DMA, Dominica, "BBOX(-6845190.333337227\, -6818396.733782433\, 1762138.8493679555\, 1712035.77580254)" +GRD, Grenada, "BBOX(-6877894.997852321\, -6856878.879868893\, 1372710.0161931934\, 1345360.731534649)" +GLP, Guadeloupe, "BBOX(-6879111.38592805\, -6811314.810418132\, 1864198.7087877272\, 1789672.9198651556)" +MTQ, Martinique, "BBOX(-6816263.407061167\, -6770111.459379609\, 1675390.1030315096\, 1620466.564996925)" +LCA, St. Lucia, "BBOX(-6799347.965159521\, -6776915.084016965\, 1586760.2747788534\, 1540902.846138527)" +SPM, St. Pierre & Miquelon, "BBOX(-6278172.373236121\, -6250088.469463722\, 5964272.744483719\, 5900906.394026551)" +VCT, St. Vincent & the Grenadines, "BBOX(-6821674.647507875\, -6803878.668434177\, 1503545.1028787405\, 1474620.605161206)" +ABW, Aruba, "BBOX(-7799006.120542209\, -7778434.278646477\, 1417237.7724451458\, 1392531.3743975367)" +BMU, Bermuda, "BBOX(-7216070.475135298\, -7199789.443011595\, 3813230.825275473\, 3797561.1925476543)" +DOM, Dominican Republic, "BBOX(-8015344.418919742\, -7605673.442087284\, 2264838.2331280783\, 1991268.1942175906)" +HTI, Haiti, "BBOX(-8289716.573465983\, -7973724.065068766\, 2283868.061303094\, 2040215.3097965734)" +JAM, Jamaica, "BBOX(-8724542.638268478\, -8484896.042272912\, 2098797.886578782\, 2002138.6713165536)" +ANT, Netherlands Antilles, "BBOX(-7699258.7361087445\, -7591201.908286172\, 1389429.1415046235\, 1348047.674912462)" +BHS, The Bahamas, "BBOX(-8791890.930189032\, -8097256.305860282\, 3114624.5106054945\, 2381778.6607825435)" +TCA, Turks & Caicos Is., "BBOX(-8018505.892457832\, -7917885.206619215\, 2506456.133236025\, 2443216.1674464582)" +BLZ, Belize, "BBOX(-9931524.217026532\, -9771579.370801603\, 2094970.9791089285\, 1791970.7485571986)" +CYM, Cayman Is., "BBOX(-9061499.6124054\, -9027238.590089742\, 2196677.690165189\, 2186160.351965059)" +COL, Colombia, "BBOX(-9097045.039005652\, -7443984.998678304\, 1412960.1248500098\, -472076.97756910085)" +CRI, Costa Rica, "BBOX(-9563612.298130559\, -9190693.005900422\, 1256252.842749445\, 896349.8334170822)" +CUB, Cuba, "BBOX(-9456916.57372173\, -8252253.557317591\, 2655499.846135876\, 2251949.753820664)" +SLV, El Salvador, "BBOX(-1.0030783799451409E7\, -9762126.342283737\, 1623823.8238794443\, 1477605.2302434247)" +HND, Honduras, "BBOX(-9946451.158864416\, -9254195.76601206\, 1855249.5859095547\, 1458038.3723417278)" +NIC, Nicaragua, "BBOX(-9761586.888031427\, -9254195.76601206\, 1691760.81737009\, 1199200.9443015517)" +PAN, Panama, "BBOX(-9242889.713250706\, -8593679.45241179\, 1075976.1383535631\, 804303.6245583462)" +AIA, Anguilla, "BBOX(-7031805.325801677\, -7010089.898777183\, 2069525.485454939\, 2056805.549131826)" +ATG, Antigua & Barbuda, "BBOX(-6889686.737551939\, -6864733.02654072\, 2005303.4210994085\, 1919628.1877410556)" +VGB, British Virgin Is., "BBOX(-7202202.070335221\, -7160573.590161418\, 2096726.335695059\, 2082531.6290789556)" +MSR, Montserrat, "BBOX(-6928185.136284053\, -6917269.703615838\, 1898992.8327456792\, 1882606.3105989075)" +PRI, Puerto Rico, "BBOX(-7488061.394454311\, -7269287.202979579\, 2098439.2297828426\, 2028446.302847273)" +KNA, St. Kitts & Nevis, "BBOX(-6997852.881114455\, -6971105.813106805\, 1968620.0064461157\, 1945153.7466145495)" +VIR, Virgin Is., "BBOX(-7238383.9104642505\, -7187072.749663104\, 2082975.2861753216\, 1999737.0895242055)" +FRO, Faroe Is., "BBOX(-827490.42907036\, -711300.1539736006\, 8944413.838654397\, 8715539.142798016)" +GRL, Greenland, "BBOX(-8132290.553358883\, -1353381.9599010698\, 1.841838614386466E7\, 8353191.775986784)" +XGK, Guernsey, "BBOX(-297068.19496499473\, -278407.0408089712\, 6361534.846607885\, 6346855.715083607)" +ISL, Iceland, "BBOX(-2731602.192501422\, -1502751.454502109\, 1.0025136653899286E7\, 9196525.03584683)" +IRL, Ireland, "BBOX(-1166041.2756762397\, -669370.2206187705\, 7435966.643781227\, 6700487.126114637)" +XIM, Isle of Man, "BBOX(-532903.6568742928\, -479640.2861633771\, 7249411.799394163\, 7180682.877256964)" +SJM, Jan Mayen, "BBOX(-1015223.6258196725\, -882597.5845070281\, 1.1464383304063711E7\, 1.1335539300648466E7)" +XJE, Jersey, "BBOX(-250175.41607230977\, -224308.77391722222\, 6319282.822387621\, 6303377.056271344)" +GBR, United Kingdom, "BBOX(-909665.4752870986\, 194747.32654372943\, 8589937.148187652\, 6438533.511709376)" +CPV, Cape Verde, "BBOX(-2823124.068441826\, -2523179.7117936057\, 1943228.8819694468\, 1667440.6983404886)" +CIV, Cote d'Ivoire, "BBOX(-958058.0616790326\, -276938.62540612154\, 1202097.1729137793\, 484115.97315150854)" +GHA, Ghana, "BBOX(-361664.8917125052\, 133893.0797566771\, 1249767.3181259448\, 526814.3511759888)" +GIB, Gibraltar, "BBOX(-596246.4508776823\, -593834.8254294725\, 4323115.767768943\, 4316053.421468498)" +LBR, Liberia, "BBOX(-1279319.9894917065\, -820246.5358469777\, 951144.4190395237\, 483992.16413836647)" +MAR, Morocco, "BBOX(-1466630.283495554\, -112634.06264437255\, 4289504.155676036\, 3206707.2043454945)" +PRT, Portugal, "BBOX(-3483078.5525721395\, -689118.2982827483\, 5183576.317394064\, 3847286.4078652565)" +ESP, Spain, "BBOX(-2022660.0079814764\, 480560.1191156738\, 5429039.221465501\, 3203347.2301618545)" +ESH, Western Sahara, "BBOX(-1903733.2771624175\, -964738.2330011163\, 3207048.827624554\, 2363772.158427126)" +BFA, Burkina Faso, "BBOX(-614576.7635071143\, 266936.0125622843\, 1698741.2811715933\, 1050643.0120585556)" +GIN, Guinea, "BBOX(-1678791.0953426699\, -851969.5850923934\, 1422911.1290510038\, 802936.7522689679)" +GNB, Guinea-Bissau, "BBOX(-1861013.9772984823\, -1518830.9983475052\, 1423734.7230846898\, 1223613.9918118552)" +MLI, Mali, "BBOX(-1363089.019496892\, 473262.6812172274\, 2875778.1558879707\, 1134962.1365298633)" +MRT, Mauritania, "BBOX(-1900842.0873479373\, -535013.6065024948\, 3159807.24053085\, 1657600.8186799039)" +SEN, Senegal, "BBOX(-1951740.3641577882\, -1265694.4838205066\, 1884840.6777415504\, 1380068.3247828495)" +SLE, Sierra Leone, "BBOX(-1480060.423460439\, -1142617.6510657615\, 1118607.3838558097\, 772615.2434245716)" +GMB, The Gambia, "BBOX(-1872579.0705148762\, -1536054.1273216614\, 1554306.33090056\, 1466584.8753009895)" +DJI, Djibouti, "BBOX(4648685.682234346\, 4833537.819242839\, 1426428.7393574219\, 1225554.7892715929)" +ERI, Eritrea, "BBOX(4056847.594510955\, 4800250.285874032\, 2036949.5002702742\, 1387149.8027029647)" +ETH, Ethiopia, "BBOX(3672630.3758422886\, 5342026.99671924\, 1675790.1336981696\, 379451.74027328007)" +MNG, Mongolia, "BBOX(9769520.962097632\, 1.3350714510090472E7\, 6825981.925445475\, 5099261.916823782)" +SDN, Sudan, "BBOX(2430004.2961371886\, 4297767.240203056\, 2539428.7064047027\, 389123.6754710965)" +UGA, Uganda, "BBOX(3292196.0161092333\, 3897263.9800336002\, 470504.09041435266\, -164337.88255462408)" +ISR, Gaza Strip, "BBOX(3808981.5012748297\, 3847078.1479647276\, 3710408.4677697835\, 3660903.6805555364)" +IRQ, Iraq, "BBOX(4318606.2488766555\, 5405751.393937016\, 4492721.642260634\, 3383496.8234396563)" +ISR, Israel, "BBOX(3814649.7784257433\, 3972002.8842663835\, 3931233.3769460395\, 3437740.2376509146)" +JOR, Jordan, "BBOX(3891775.929138256\, 4374979.440881939\, 3945530.7721081185\, 3399709.663800458)" +KAZ, Kazakhstan, "BBOX(5176263.146752886\, 9723558.146230904\, 7448249.257062752\, 4952703.862043582)" +NOR, Norway, "BBOX(533173.8292784104\, 3459090.2041849457\, 1.1455379410923388E7\, 7964779.911100031)" +RUS, Russia, "BBOX(-2.003750834E7\, 2.003750834E7\, 1.6850434409817755E7\, 5041380.846897432)" +SWE, Sweden, "BBOX(1237130.9043623458\, 2690259.1355019724\, 1.0769543191624273E7\, 7427971.135671626)" +ISR, West Bank, "BBOX(3883735.6562778493\, 3959702.080535439\, 3835248.5789866336\, 3678377.284759022)" +DZA, Algeria, "BBOX(-964830.2942199894\, 1334328.0705815821\, 4451638.686907341\, 2152156.534692522)" +AND, Andorra, "BBOX(158228.52231611632\, 198339.94046960064\, 5259751.808527718\, 5226573.156424563)" +CMR, Cameroon, "BBOX(946478.719567819\, 1804154.9870354445\, 1469444.988943757\, 184166.28005485257)" +CAF, Central African Republic, "BBOX(1605103.603700283\, 3056801.8246613783\, 1232201.6067875316\, 247331.9412217624)" +LBY, Libya, "BBOX(1036539.304552783\, 2799870.317374274\, 3918041.4975678376\, 2213781.647695001)" +MCO, Monaco, "BBOX(822751.2243894777\, 828138.0858677052\, 5429655.8071539095\, 5423375.498489419)" +TUN, Tunisia, "BBOX(834029.8925561006\, 1289264.82751983\, 4486662.225217784\, 3533714.341264127)" +BEN, Benin, "BBOX(86457.72966594121\, 429136.6369483333\, 1390883.792858654\, 693627.7186615759)" +TCD, Chad, "BBOX(1498576.8622784517\, 2671973.3506688518\, 2686597.2252112613\, 832635.3730826946)" +GNQ, Equatorial Guinea, "BBOX(937773.5353889967\, 1263909.364466394\, 419234.1992921709\, 103548.81812163288)" +KIR, Kiribati, "BBOX(-1.7541914599896543E7\, 1.9252428633165136E7\, 226366.04306531145\, 148735.3163895852)" +NER, Niger, "BBOX(18552.840291496777\, 1780740.379303719\, 2695306.478633392\, 1310820.5810745189)" +NGA, Nigeria, "BBOX(299727.7289191666\, 1630792.0233506353\, 1561771.5570046515\, 476092.4293577717)" +STP, Sao Tome & Principe, "BBOX(719695.6473290791\, 830830.0137936934\, 189409.56079307984\, 2041.1542177410504)" +TGO, Togo, "BBOX(-16671.65221684311\, 200130.18052028888\, 1247820.9113916112\, 680396.3710024672)" +ALB, Albania, "BBOX(2147190.0053688344\, 2343645.64081804\, 5260414.963633992\, 4814487.957249004)" +BIH, Bosnia & Herzegovina, "BBOX(1752234.5746612719\, 2184064.14141101\, 5663486.702317411\, 5246118.059706764)" +HRV, Croatia, "BBOX(1503346.4571803163\, 2162381.1083583334\, 5866635.618622956\, 5221085.75286942)" +ITA, Italy, "BBOX(737376.1880908412\, 2061018.5894331736\, 5957525.94908941\, 4390316.944679211)" +MKD, Macedonia, "BBOX(2277465.201675234\, 2563796.186476749\, 5214901.594868669\, 4991108.7995952675)" +MLT, Malta, "BBOX(1595108.1153038554\, 1621924.980632222\, 4299511.834205549\, 4273136.461790226)" +SMR, San Marino, "BBOX(1381134.799507896\, 1392730.2829452723\, 5463410.973754562\, 5449776.352704761)" +SMN, Serbia & Montenegro, "BBOX(2054214.9647958176\, 2560904.8853427777\, 5809419.7157107135\, 5138387.144313233)" +VTC, Vatican City, "BBOX(1385312.3973578045\, 1386786.8240131561\, 5147266.721875869\, 5146144.937762506)" +BGR, Bulgaria, "BBOX(2489690.801465982\, 3184309.173149079\, 5500283.923251328\, 5048257.112102198)" +CYP, Cyprus, "BBOX(3592264.716628652\, 3850099.91554189\, 4257858.611081361\, 4115102.5028513763)" +EGY, Egypt, "BBOX(2750348.3947484\, 4107224.6734649837\, 3717055.3733837567\, 2510824.567439936)" +GEO, Georgia, "BBOX(4453109.470762285\, 5199824.4735734565\, 5401399.644378745\, 5019430.87461186)" +GRC, Greece, "BBOX(2186314.7988755554\, 3143444.7899599737\, 5123271.623236523\, 4154446.48763015)" +LBN, Lebanon, "BBOX(3907406.1875188733\, 4076936.6437751846\, 4116080.386414876\, 3903547.2121638493)" +SYR, Syria, "BBOX(3964583.8854840077\, 4717533.78165415\, 4479682.761680629\, 3804547.447187875)" +TUR, Turkey, "BBOX(2857106.79203054\, 4989400.245782474\, 5177469.827842194\, 4275668.354346954)" +AUT, Austria, "BBOX(1061272.4916527711\, 1910952.9027368103\, 6278042.62617315\, 5845892.142474166)" +CZE, Czech Republic, "BBOX(1346264.5256192111\, 2098619.3077916563\, 6630584.029505155\, 6204126.892396778)" +DNK, Denmark, "BBOX(900899.5106663116\, 1686397.1108695522\, 7914142.641677729\, 7277306.821832056)" +HUN, Hungary, "BBOX(1793557.3715133998\, 2548637.4774590665\, 6203250.422795402\, 5740109.762720737)" +POL, Poland, "BBOX(1574907.6352293568\, 2687639.1199670266\, 7330108.850656106\, 6275356.531185668)" +SVK, Slovakia, "BBOX(1875145.4300552672\, 2511151.0842176126\, 6377430.961535088\, 6063294.76382884)" +SVN, Slovenia, "BBOX(1489841.399198138\, 1848779.9652620046\, 5921897.448055978\, 5688808.783113411)" +SJM, Svalbard, "BBOX(1167509.6910790894\, 3744509.3710375\, 1.6048121551074298E7\, 1.2655555793739378E7)" +BEL, Belgium, "BBOX(282936.63088871894\, 712244.3658943777\, 6710441.719074484\, 6361653.309031685)" +FRA, France, "BBOX(-533251.7529219548\, 1064461.2384661005\, 6637425.700005567\, 5066318.240535327)" +DEU, Germany, "BBOX(652888.8134116667\, 1673556.9642057894\, 7372844.587967681\, 5987030.890923241)" +LIE, Liechtenstein, "BBOX(1054711.6548248013\, 1072439.8403286163\, 5987002.506696636\, 5951457.074129165)" +LUX, Luxembourg, "BBOX(638355.4972931738\, 726251.3634604733\, 6477821.694262034\, 6351301.791746342)" +NLD, Netherlands, "BBOX(375242.7526416523\, 802721.8423723045\, 7069632.465484033\, 6577873.226207013)" +CHE, Switzerland, "BBOX(664244.403346417\, 1167542.0850509058\, 6074750.670815664\, 5753058.221661312)" +USA, United States, "BBOX(-1.9838976150769826E7\, 2.001256564961837E7\, 1.1523520412740182E7\, 2146164.589200235)" +BLR, Belarus, "BBOX(2578760.5316635333\, 3644606.1393169463\, 7591830.885400406\, 6665963.6751351105)" +EST, Estonia, "BBOX(2430923.1272140685\, 3138551.853062327\, 8325466.382266233\, 7867699.765386352)" +FIN, Finland, "BBOX(2171998.1104861163\, 3515688.0389226186\, 1.1097617254588177E7\, 8356849.0793245975)" +LVA, Latvia, "BBOX(2334214.876198328\, 3143213.0227801744\, 7984826.971795753\, 7493955.154644284)" +LTU, Lithuania, "BBOX(2331345.838962512\, 2984815.5174770574\, 7648495.086573079\, 7149414.5404388225)" +MDA, Moldova, "BBOX(2964993.634990694\, 3353912.54367185\, 6185122.9269956285\, 5692430.167578349)" +ROM, Romania, "BBOX(2255447.2082748064\, 3303096.1980072116\, 6150868.213605207\, 5407332.237900151)" +UKR, Ukraine, "BBOX(2465887.5773919513\, 4472677.433490184\, 6868872.82154549\, 5524305.8506691335)" +IND, India, "BBOX(7585780.649085295\, 1.0840351679187058E7\, 4232806.675603967\, 752682.9865532124)" +MDV, Maldives, "BBOX(8111115.582462115\, 8197263.621304713\, 784297.2010665077\, -71431.20290758506)" +OMN, Oman, "BBOX(5788534.594925483\, 6662146.69277591\, 3044819.2631402686\, 1879282.0779841878)" +SOM, Somalia, "BBOX(4562831.081569439\, 5723081.7399744\, 1343337.2289440092\, -186472.5685638059)" +LKA, Sri Lanka, "BBOX(8871728.267099438\, 9116138.224105384\, 1099474.3430723047\, 659969.3086218301)" +TKM, Turkmenistan, "BBOX(5705144.162508433\, 7421768.6339453105\, 5280998.579824433\, 4183738.4781891424)" +UZB, Uzbekistan, "BBOX(6233612.182953193\, 8144973.85086014\, 5711801.139928842\, 4464923.610179015)" +YEM, Yemen, "BBOX(4737309.24391286\, 6063959.275257026\, 2154858.799301538\, 1362182.6880406907)" +ARM, Armenia, "BBOX(4837295.297334552\, 5189774.327307057\, 5056256.290729958\, 4698942.432854185)" +AZE, Azerbaijan, "BBOX(4984760.226767874\, 5752658.326798638\, 5268048.77475221\, 4616618.723595905)" +BHR, Bahrain, "BBOX(5616438.669684706\, 5654628.379468894\, 3034905.550106453\, 2946160.3652355284)" +IRN, Iran, "BBOX(4901948.6557028685\, 7049893.741177648\, 4833901.247983729\, 2885079.0840316075)" +KWT, Kuwait, "BBOX(5181582.214661229\, 5389710.255315938\, 3514372.934498193\, 3317085.938189461)" +QAT, Qatar, "BBOX(5649679.671506368\, 5745847.577713873\, 3017981.013632691\, 2821312.488451719)" +SAU, Saudi Arabia, "BBOX(3848553.5764954956\, 6196722.907460272\, 3783666.794569951\, 1848481.0463722278)" +ARE, United Arab Emirates, "BBOX(5742229.694263595\, 6276378.014364274\, 3009473.8025495554\, 2587735.5585281393)" +AFG, Afghanistan, "BBOX(6735292.615095663\, 8339581.582762433\, 4646317.28372925\, 3427436.851842879)" +KGZ, Kyrgyzstan, "BBOX(7708819.076615721\, 8936904.82707441\, 5345044.727405903\, 4749710.205362992)" +NPL, Nepal, "BBOX(8911370.139640821\, 9817772.840653224\, 3558261.041954822\, 3044776.39805181)" +PAK, Pakistan, "BBOX(6775605.521527455\, 8663319.92396695\, 4447583.65883328\, 2715440.846640232)" +TJK, Tajikistan, "BBOX(7499004.100397766\, 8369832.209103333\, 5019609.3336218465\, 4393464.385496015)" +BGD, Bangladesh, "BBOX(9800998.997143846\, 1.0315904296110207E7\, 3076839.5287209633\, 2361476.7409209567)" +BTN, Bhutan, "BBOX(9879820.321061922\, 1.025410784115321E7\, 3290010.9896438504\, 3086490.161301852)" +BRN, Brunei, "BBOX(1.2701006428488798E7\, 1.2841845733150518E7\, 563234.0022074429\, 447670.0898939893)" +CHN, China, "BBOX(8195345.9204370845\, 1.5002356674063785E7\, 7086089.890077106\, 2057325.3856844143)" +JPN, Japan, "BBOX(1.3767868049134541E7\, 1.623176311896106E7\, 5698420.16133248\, 2784071.2548644035)" +PRK, North Korea, "BBOX(1.3839679250759868E7\, 1.4549170017730366E7\, 5312900.3745006835\, 4533106.558340659)" +PLW, Palau, "BBOX(1.4967181830048332E7\, 1.4990157059749957E7\, 863059.693444481\, 815429.4880146481)" +PHL, Philippines, "BBOX(1.3018814446461111E7\, 1.4092828900986778E7\, 2201037.2202695687\, 562799.2811739098)" +KOR, South Korea, "BBOX(1.4037278471337833E7\, 1.4425544602525068E7\, 4668132.414354527\, 3920844.3714562915)" +KHM, Cambodia, "BBOX(1.139316126476325E7\, 1.1982027233402364E7\, 1655642.1223870981\, 1166706.2324655629)" +LAO, Laos, "BBOX(1.1142120562289124E7\, 1.1988580834463434E7\, 2571654.2509495416\, 1565804.2404149454)" +MYS, Malaysia, "BBOX(1.1092089575631922E7\, 1.32777233218629E7\, 820779.1279511156\, 94934.7631846226)" +MMR, Myanmar, "BBOX(1.0264212645289583E7\, 1.126212909591956E7\, 3318054.720285839\, 1100761.292465509)" +SGP, Singapore, "BBOX(1.1537257221127674E7\, 1.157699827933534E7\, 160905.1210847127\, 140165.52511697204)" +THA, Thailand, "BBOX(1.0836648747645825E7\, 1.1759712080245482E7\, 2326960.8760532974\, 628128.2178646458)" +VNM, Vietnam, "BBOX(1.137025572106285E7\, 1.2185570803468373E7\, 2671268.1479721097\, 956373.5794062541)" +GUM, Guam, "BBOX(1.610060037235469E7\, 1.613612854443387E7\, 1534354.7088998647\, 1486593.2644101644)" +MHL, Marshall Is., "BBOX(1.8069932221681617E7\, 1.9077718703641918E7\, 1642457.1731015244\, 624414.5801310536)" +FSM, Micronesia, "BBOX(1.76018490137313E7\, 1.8149851601056725E7\, 778674.0289479959\, 586550.7704269526)" +MNP, Northern Mariana Is., "BBOX(1.6205076831395375E7\, 1.6232394634432243E7\, 1720127.7032804906\, 1678605.9653024632)" +UMI, Wake I., "BBOX(1.854682692392445E7\, 1.8552751235904157E7\, 2193187.709933591\, 2187863.8226788775)" +BWA, Botswana, "BBOX(2225956.6714169392\, 3269856.198060967\, -2012057.3125287183\, -3107932.575048184)" +BDI, Burundi, "BBOX(3226595.4401938887\, 3434561.510989516\, -256277.86419111618\, -495653.34463959694)" +ATF, French Southern & Antarctic Lands, "BBOX(5749744.761766512\, 7855537.163585416\, -5833010.924598094\, -6398787.743617378)" +HMD, Heard I. & McDonald Is., "BBOX(8152450.513138738\, 8212470.976939865\, -6976553.288377103\, -7019975.393962887)" +KEN, Kenya, "BBOX(3774534.2414511004\, 4664861.406119774\, 515133.4762737857\, -520395.9201280237)" +RWA, Rwanda, "BBOX(3212062.1240753955\, 3439022.3056239635\, -117387.0182772328\, -314659.7811132031)" +TZA, Tanzania, "BBOX(3266205.9206388732\, 4501404.98655826\, -111015.40498408281\, -1316180.4208213643)" +ZMB, Zambia, "BBOX(2448627.045068894\, 3751720.8702890654\, -915014.476700008\, -2046319.4302683398)" +ZWE, Zimbabwe, "BBOX(2809472.180051312\, 3681512.6693309383\, -1760356.671722378\, -2561396.0054164226)" +ATA, Antarctica, "BBOX(-2.003750834E7\, 2.003750834E7\, -8512662.881033322\, -4.748140766343476E9)" +NOR, Bouvet I., "BBOX(372070.1471544857\, 387855.25094677455\, -7243144.612387524\, -7258293.454237509)" +COM, Comoros, "BBOX(4810563.480097139\, 4957103.455881589\, -1273745.795821429\, -1389333.8616461232)" +REU, Juan De Nova I., "BBOX(4755993.663842456\, 4760121.613199477\, -1926881.0822095312\, -1929687.4249448022)" +LSO, Lesotho, "BBOX(3007181.718244638\, 3278977.271857335\, -3321117.2692412077\, -3587446.106149188)" +MWI, Malawi, "BBOX(3638129.460024005\, 3998703.3094073967\, -1048497.2089351554\, -1936578.3607502843)" +MOZ, Mozambique, "BBOX(3363297.7786198338\, 4546968.054133605\, -1172181.8581435068\, -3106026.6491282047)" +ZAF, South Africa, "BBOX(1834915.5679635953\, 4218142.412200545\, -2527908.4975596936\, -5937134.146607068)" +SWZ, Swaziland, "BBOX(3428455.080322901\, 3577073.7249586442\, -2965472.9128583763\, -3163056.5390926218)" +AGO, Angola, "BBOX(1305916.2195893514\, 2681068.153065396\, -489058.770192694\, -2039467.1713562359)" +COG, Congo, "BBOX(1240172.93208683\, 2075397.0601399948\, 413407.92638141196\, -558981.4471095677)" +ZAR, Congo\, DRC, "BBOX(1359717.9313576685\, 3484608.750292371\, 599858.1461695591\, -1512112.8916449302)" +FJI, Fiji, "BBOX(-2.003750834E7\, 2.003750834E7\, -1822502.649701532\, -2174110.2636207)" +GAB, Gabon, "BBOX(968572.632860957\, 1616312.474546188\, 258097.85802697268\, -437302.607003333)" +NAM, Namibia, "BBOX(1304262.6798733384\, 2812423.14843234\, -1915491.159689654\, -3370794.2160844747)" +NZL, New Zealand, "BBOX(-1.9686713351283982E7\, 1.9908496063316472E7\, -4084625.39078185\, -6905327.726548656)" +IOT, British Indian Ocean Territory, "BBOX(8054844.581749367\, 8070026.5565406205\, -807374.1159864698\, -830132.9519243974)" +REU, Glorioso Is., "BBOX(5263084.334556216\, 5265751.883513724\, -1295003.534066991\, -1297694.4422191991)" +MDG, Madagascar, "BBOX(4813101.564486872\, 5621789.129689449\, -1339512.841638736\, -2948183.285092941)" +MUS, Mauritius, "BBOX(6379309.136233983\, 7068315.001831045\, -2234372.9783939887\, -2334800.8501905375)" +MYT, Mayotte, "BBOX(5013736.69021733\, 5042032.101022207\, -1421199.6616333937\, -1458875.4272419864)" +REU, Reunion, "BBOX(6147123.9517467795\, 6217533.529663724\, -2374779.1643490326\, -2436517.3438334884)" +SYC, Seychelles, "BBOX(5143593.993155349\, 6182746.188795668\, -507222.7096158059\, -1058244.6497234497)" +CXR, Christmas I., "BBOX(1.175856649136589E7\, 1.1772247656782478E7\, -1162331.3692172004\, -1176694.9418773586)" +CCK, Cocos Is., "BBOX(1.0777673796502084E7\, 1.0782945219668373E7\, -1360554.4203425802\, -1368415.0936628287)" +IDN, Indonesia, "BBOX(1.0598833913871005E7\, 1.5696829439852942E7\, 659456.6237303711\, -1224130.4157647756)" +TLS, Timor Leste, "BBOX(1.3808748684969299E7\, 1.4171927521756383E7\, -909204.3581778448\, -1058309.2029773812)" +AUS, Australia, "BBOX(1.2568773011020126E7\, 1.7695387664886124E7\, -1134231.265244234\, -7314247.137263005)" +NRU, Nauru, "BBOX(1.8579714820321366E7\, 1.8585573231162526E7\, -54918.590898148344\, -61473.57829423625)" +NCL, New Caledonia, "BBOX(1.8254475669742182E7\, 1.871620264608858E7\, -2283448.9342597914\, -2592628.621050228)" +NFK, Norfolk I., "BBOX(1.869176089341545E7\, 1.870154888228107E7\, -3375716.673702962\, -3385973.448600687)" +PNG, Papua New Guinea, "BBOX(1.5680335898821346E7\, 1.7362149763616595E7\, -150883.37308403326\, -1305049.300451269)" +SLB, Solomon Is., "BBOX(1.7329249844714675E7\, 1.858276697811951E7\, -736957.2370687899\, -1328168.5471204517)" +TUV, Tuvalu, "BBOX(1.962509790181899E7\, 1.9952046251859576E7\, -679153.8120624761\, -956604.9181074377)" +VUT, Vanuatu, "BBOX(1.8537103723002467E7\, 1.8912498315429542E7\, -1540647.6688226506\, -2303165.641357482)" diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox.json new file mode 100644 index 0000000000000..eb386b84ff70b --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox.json @@ -0,0 +1,13 @@ +{ + "properties": { + "id": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "shape": { + "type": "geo_shape" + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox_web.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox_web.json new file mode 100644 index 0000000000000..303c828c84285 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-countries_bbox_web.json @@ -0,0 +1,13 @@ +{ + "properties": { + "id": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "shape": { + "type": "shape" + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index f3cb362c40e22..754d4a0e156cf 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -46,10 +46,10 @@ median_absolute_deviation|"double|integer|long|unsigned_long median_absolute_dev min |"double|integer|long|unsigned_long min(field:double|integer|long|unsigned_long)" |field |"double|integer|long|unsigned_long" | "" |"double|integer|long|unsigned_long" | "The minimum value of a numeric field." | false | false | true mv_avg |"double mv_avg(field:double|integer|long|unsigned_long)" |field |"double|integer|long|unsigned_long" | "" |double | "Converts a multivalued field into a single valued field containing the average of all of the values." | false | false | false mv_concat |"keyword mv_concat(v:text|keyword, delim:text|keyword)" |[v, delim] |["text|keyword", "text|keyword"] |["values to join", "delimiter"] |keyword | "Reduce a multivalued string field to a single valued field by concatenating all values." | [false, false] | false | false -mv_count |"integer mv_count(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | "" | integer | "Reduce a multivalued field to a single valued field containing the count of values." | false | false | false +mv_count |"integer mv_count(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | "" | integer | "Reduce a multivalued field to a single valued field containing the count of values." | false | false | false mv_dedupe |"boolean|date|double|integer|ip|keyword|long|text|version mv_dedupe(v:boolean|date|double|integer|ip|keyword|long|text|version)" |v | "boolean|date|double|integer|ip|keyword|long|text|version" | "" |"boolean|date|double|integer|ip|keyword|long|text|version" | "Remove duplicate values from a multivalued field." | false | false | false -mv_first |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version mv_first(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the first value." | false | false | false -mv_last |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version mv_last(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the last value." | false | false | false +mv_first |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version mv_first(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the first value." | false | false | false +mv_last |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version mv_last(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the last value." | false | false | false mv_max |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_max(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the maximum value." | false | false | false mv_median |"double|integer|long|unsigned_long mv_median(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | "" |"double|integer|long|unsigned_long" | "Converts a multivalued field into a single valued field containing the median value." | false | false | false mv_min |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" |v | "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "" |"boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version" | "Reduce a multivalued field to a single valued field containing the minimum value." | false | false | false @@ -75,20 +75,22 @@ tau |double tau() to_bool |"boolean to_bool(v:boolean|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|keyword|text|double|long|unsigned_long|integer" | |boolean | "Converts an input value to a boolean value." |false |false | false to_boolean |"boolean to_boolean(v:boolean|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|keyword|text|double|long|unsigned_long|integer" | |boolean | "Converts an input value to a boolean value." |false |false | false to_cartesianpoint |"cartesian_point to_cartesianpoint(v:cartesian_point|keyword|text)" |v |"cartesian_point|keyword|text" | |cartesian_point | "Converts an input value to a point value." |false |false | false +to_cartesianshape |"cartesian_shape to_cartesianshape(v:cartesian_shape|keyword|text)" |v |"cartesian_shape|keyword|text" | |cartesian_shape | "Converts an input value to a shape value." |false |false | false to_datetime |"date to_datetime(v:date|keyword|text|double|long|unsigned_long|integer)" |v |"date|keyword|text|double|long|unsigned_long|integer" | |date | "Converts an input value to a date value." |false |false | false to_dbl |"double to_dbl(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |double | "Converts an input value to a double value." |false |false | false to_degrees |"double to_degrees(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | |double | "Converts a number in radians to degrees." |false |false | false to_double |"double to_double(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |double | "Converts an input value to a double value." |false |false | false to_dt |"date to_dt(v:date|keyword|text|double|long|unsigned_long|integer)" |v |"date|keyword|text|double|long|unsigned_long|integer" | |date | "Converts an input value to a date value." |false |false | false to_geopoint |"geo_point to_geopoint(v:geo_point|keyword|text)" |v |"geo_point|keyword|text" | |geo_point | "Converts an input value to a geo_point value." |false |false | false +to_geoshape |"geo_shape to_geoshape(v:geo_shape|keyword|text)" |v |"geo_shape|keyword|text" | |geo_shape | "Converts an input value to a geo_shape value." |false |false | false to_int |"integer to_int(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |integer | "Converts an input value to an integer value." |false |false | false to_integer |"integer to_integer(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |integer | "Converts an input value to an integer value." |false |false | false to_ip |"ip to_ip(v:ip|keyword|text)" |v |"ip|keyword|text" | |ip | "Converts an input string to an IP value." |false |false | false to_long |"long to_long(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |long | "Converts an input value to a long value." |false |false | false to_lower |"keyword|text to_lower(str:keyword|text)" |str |"keyword|text" | "The input string" |"keyword|text" | "Returns a new string representing the input string converted to lower case." |false |false | false to_radians |"double to_radians(v:double|integer|long|unsigned_long)" |v |"double|integer|long|unsigned_long" | |double | "Converts a number in degrees to radians." |false |false | false -to_str |"keyword to_str(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" |v |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | |keyword | "Converts a field into a string." |false |false | false -to_string |"keyword to_string(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" |v |"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version" | |keyword | "Converts a field into a string." |false |false | false +to_str |"keyword to_str(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" |v |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | |keyword | "Converts a field into a string." |false |false | false +to_string |"keyword to_string(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" |v |"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version" | |keyword | "Converts a field into a string." |false |false | false to_ul |"unsigned_long to_ul(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |unsigned_long | "Converts an input value to an unsigned long value." |false |false | false to_ulong |"unsigned_long to_ulong(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |unsigned_long | "Converts an input value to an unsigned long value." |false |false | false to_unsigned_long |"unsigned_long to_unsigned_long(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |unsigned_long | "Converts an input value to an unsigned long value." |false |false | false @@ -139,10 +141,10 @@ double e() "double|integer|long|unsigned_long min(field:double|integer|long|unsigned_long)" "double mv_avg(field:double|integer|long|unsigned_long)" "keyword mv_concat(v:text|keyword, delim:text|keyword)" -"integer mv_count(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" +"integer mv_count(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" "boolean|date|double|integer|ip|keyword|long|text|version mv_dedupe(v:boolean|date|double|integer|ip|keyword|long|text|version)" -"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version mv_first(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" -"boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version mv_last(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" +"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version mv_first(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" +"boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version mv_last(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_max(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" "double|integer|long|unsigned_long mv_median(v:double|integer|long|unsigned_long)" "boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version mv_min(v:boolean|date|double|integer|ip|keyword|long|text|unsigned_long|version)" @@ -167,21 +169,23 @@ double pi() double tau() "boolean to_bool(v:boolean|keyword|text|double|long|unsigned_long|integer)" "boolean to_boolean(v:boolean|keyword|text|double|long|unsigned_long|integer)" -"cartesian_point to_cartesianpoint(v:cartesian_point|keyword|text)" +"cartesian_point to_cartesianpoint(v:cartesian_point|keyword|text)" +"cartesian_shape to_cartesianshape(v:cartesian_shape|keyword|text)" "date to_datetime(v:date|keyword|text|double|long|unsigned_long|integer)" "double to_dbl(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "double to_degrees(v:double|integer|long|unsigned_long)" "double to_double(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" -"date to_dt(v:date|keyword|text|double|long|unsigned_long|integer)" -"geo_point to_geopoint(v:geo_point|keyword|text)" +"date to_dt(v:date|keyword|text|double|long|unsigned_long|integer)" +"geo_point to_geopoint(v:geo_point|keyword|text)" +"geo_shape to_geoshape(v:geo_shape|keyword|text)" "integer to_int(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "integer to_integer(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "ip to_ip(v:ip|keyword|text)" "long to_long(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "keyword|text to_lower(str:keyword|text)" "double to_radians(v:double|integer|long|unsigned_long)" -"keyword to_str(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" -"keyword to_string(v:boolean|cartesian_point|date|double|geo_point|integer|ip|keyword|long|text|unsigned_long|version)" +"keyword to_str(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" +"keyword to_string(v:boolean|cartesian_point|cartesian_shape|date|double|geo_point|geo_shape|integer|ip|keyword|long|text|unsigned_long|version)" "unsigned_long to_ul(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "unsigned_long to_ulong(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "unsigned_long to_unsigned_long(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" @@ -212,5 +216,5 @@ countFunctions#[skip:-8.12.99] show functions | stats a = count(*), b = count(*), c = count(*) | mv_expand c; a:long | b:long | c:long -86 | 86 | 86 +88 | 88 | 88 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec new file mode 100644 index 0000000000000..71d7c0dbdcfdd --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/spatial_shapes.csv-spec @@ -0,0 +1,135 @@ +############################################### +# Tests for GEO_SHAPE type +# + +convertFromString#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] +row wkt = "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))" +| eval pt = to_geoshape(wkt); + +wkt:keyword |pt:geo_shape +"POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))" | POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10)) +; + +convertFromStringArray#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] +row wkt = ["POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))", "POINT(75.8092915005895 22.727749187571)"] +| eval pt = to_geoshape(wkt); + +wkt:keyword |pt:geo_shape +["POLYGON ((30 10\, 40 40\, 20 40\, 10 20\, 30 10))", "POINT(75.8092915005895 22.727749187571)"] |[POLYGON ((30 10\, 40 40\, 20 40\, 10 20\, 30 10)), POINT(75.8092915005895 22.727749187571)] +; + +# need to work out how to upload WKT +simpleLoad#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] +FROM countries_bbox | WHERE id == "ISL"; + +id:keyword| name:keyword| shape:geo_shape +ISL|Iceland|BBOX(-24.538400, -13.499446, 66.536100, 63.390000) +; + +geo_shapeEquals#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] + +ROW wkt = ["POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))", "POINT(75.8092915005895 22.727749187571)"] +| MV_EXPAND wkt +| EVAL pt = to_geoshape(wkt) +| WHERE pt == to_geoshape("POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))") +; + +wkt:keyword |pt:geo_shape +"POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))" |POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10)) +; + +geo_shapeNotEquals#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] +ROW wkt = ["POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))", "POINT(75.8092915005895 22.727749187571)"] +| MV_EXPAND wkt +| EVAL pt = to_geoshape(wkt) +| WHERE pt != to_geoshape("POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))") +; + +wkt:keyword |pt:geo_shape +"POINT(75.8092915005895 22.727749187571)" |POINT(75.8092915005895 22.727749187571) +; + +convertFromStringParseError#[skip:-8.12.99, reason: spatial type geo_shape only added in 8.13] +row wkt = ["POINTX(42.97109630194 14.7552534413725)", "POINT(75.8092915005895 22.727749187571)", "POINT(111)"] +| mv_expand wkt +| eval pt = to_geoshape(wkt) +; + +warning:Line 3:13: evaluation of [to_geoshape(wkt)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 3:13: java.lang.IllegalArgumentException: Failed to parse WKT: Unknown geometry type: pointx +warning:Line 3:13: java.lang.IllegalArgumentException: Failed to parse WKT: expected number but found: ')' + +wkt:keyword |pt:geo_shape +"POINTX(42.97109630194 14.7552534413725)" |null +"POINT(75.8092915005895 22.727749187571)" |POINT(75.8092915005895 22.727749187571) +"POINT(111)" |null +; + +############################################### +# Tests for CARTESIAN_SHAPE type +# + +convertCartesianShapeFromString#[skip:-8.12.99, reason: spatial type cartesian_shape only added in 8.13] + +row wkt = "POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))" +| mv_expand wkt +| eval pt = to_cartesianshape(wkt) +; + +wkt:keyword |pt:cartesian_shape +"POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))" |POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97)) +; + +convertCartesianFromStringArray#[skip:-8.12.99, reason:spatial type cartesian_shape only added in 8.13] +row wkt = ["POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))", "POINT(7580.93 2272.77)"] +| eval pt = to_cartesianshape(wkt); + +wkt:keyword |pt:cartesian_shape +["POLYGON ((3339584.72 1118889.97\, 4452779.63 4865942.27\, 2226389.81 4865942.27\, 1113194.90 2273030.92\, 3339584.72 1118889.97))", "POINT(7580.93 2272.77)"] |[POLYGON ((3339584.72 1118889.97\, 4452779.63 4865942.27\, 2226389.81 4865942.27\, 1113194.90 2273030.92\, 3339584.72 1118889.97)), POINT(7580.93 2272.77)] +; + +# need to work out how to upload WKT +simpleCartesianShapeLoad#[skip:-8.12.99, reason: spatial type cartesian_shape only added in 8.13] +FROM countries_bbox_web | WHERE id == "ISL"; + +id:keyword| name:keyword|shape:cartesian_shape +ISL|Iceland|BBOX(-2731602.192501422, -1502751.454502109, 1.0025136653899286E7, 9196525.03584683) +; + +cartesianshapeEquals#[skip:-8.12.99, reason: spatial type cartesian_shape only added in 8.13] +ROW wkt = ["POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))", "POINT(7580.93 2272.77)"] +| MV_EXPAND wkt +| EVAL pt = to_cartesianshape(wkt) +| WHERE pt == to_cartesianshape("POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))") +; + +wkt:keyword |pt:cartesian_shape +"POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))" |POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97)) +; + +cartesianShapeNotEquals#[skip:-8.12.99, reason: spatial type cartesian_shape only added in 8.13] +ROW wkt = ["POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))", "POINT(7580.93 2272.77)"] +| MV_EXPAND wkt +| EVAL pt = to_cartesianshape(wkt) +| WHERE pt != to_cartesianshape("POLYGON ((3339584.72 1118889.97, 4452779.63 4865942.27, 2226389.81 4865942.27, 1113194.90 2273030.92, 3339584.72 1118889.97))") +; + +wkt:keyword |pt:cartesian_shape +"POINT(7580.93 2272.77)" |POINT(7580.93 2272.77) +; + +convertCartesianShapeFromStringParseError#[skip:-8.12.99, reason: spatial type cartesian_shape only added in 8.13] +row wkt = ["POINTX(4297.11 -1475.53)", "POINT(7580.93 2272.77)", "POINT(111)"] +| mv_expand wkt +| eval pt = to_cartesianshape(wkt) +; + +warning:Line 3:13: evaluation of [to_cartesianshape(wkt)] failed, treating result as null. Only first 20 failures recorded. +warning:Line 3:13: java.lang.IllegalArgumentException: Failed to parse WKT: Unknown geometry type: pointx +warning:Line 3:13: java.lang.IllegalArgumentException: Failed to parse WKT: expected number but found: ')' + +wkt:keyword |pt:cartesian_shape +"POINTX(4297.11 -1475.53)" |null +"POINT(7580.93 2272.77)" |POINT(7580.93 2272.77) +"POINT(111)" |null +; diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeFromStringEvaluator.java new file mode 100644 index 0000000000000..5ec9dcb94f67f --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeFromStringEvaluator.java @@ -0,0 +1,125 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToCartesianShape}. + * This class is generated. Do not edit it. + */ +public final class ToCartesianShapeFromStringEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToCartesianShapeFromStringEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToCartesianShapeFromString"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendBytesRef(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToCartesianShape.fromKeyword(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + BytesRef value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToCartesianShape.fromKeyword(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToCartesianShapeFromStringEvaluator get(DriverContext context) { + return new ToCartesianShapeFromStringEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToCartesianShapeFromStringEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeFromStringEvaluator.java new file mode 100644 index 0000000000000..68a6087d86953 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeFromStringEvaluator.java @@ -0,0 +1,125 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.IllegalArgumentException; +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToGeoShape}. + * This class is generated. Do not edit it. + */ +public final class ToGeoShapeFromStringEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToGeoShapeFromStringEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToGeoShapeFromString"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + try { + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } catch (IllegalArgumentException e) { + registerException(e); + return driverContext.blockFactory().newConstantNullBlock(positionCount); + } + } + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + try { + builder.appendBytesRef(evalValue(vector, p, scratchPad)); + } catch (IllegalArgumentException e) { + registerException(e); + builder.appendNull(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToGeoShape.fromKeyword(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + try { + BytesRef value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } catch (IllegalArgumentException e) { + registerException(e); + } + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToGeoShape.fromKeyword(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToGeoShapeFromStringEvaluator get(DriverContext context) { + return new ToGeoShapeFromStringEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToGeoShapeFromStringEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromCartesianShapeEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromCartesianShapeEvaluator.java new file mode 100644 index 0000000000000..5e466ddfbfddc --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromCartesianShapeEvaluator.java @@ -0,0 +1,110 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToString}. + * This class is generated. Do not edit it. + */ +public final class ToStringFromCartesianShapeEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToStringFromCartesianShapeEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToStringFromCartesianShape"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + builder.appendBytesRef(evalValue(vector, p, scratchPad)); + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToString.fromCartesianShape(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + BytesRef value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToString.fromCartesianShape(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToStringFromCartesianShapeEvaluator get(DriverContext context) { + return new ToStringFromCartesianShapeEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToStringFromCartesianShapeEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromGeoShapeEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromGeoShapeEvaluator.java new file mode 100644 index 0000000000000..df8e86e58fa69 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromGeoShapeEvaluator.java @@ -0,0 +1,110 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.Vector; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link ToString}. + * This class is generated. Do not edit it. + */ +public final class ToStringFromGeoShapeEvaluator extends AbstractConvertFunction.AbstractEvaluator { + public ToStringFromGeoShapeEvaluator(EvalOperator.ExpressionEvaluator field, Source source, + DriverContext driverContext) { + super(driverContext, field, source); + } + + @Override + public String name() { + return "ToStringFromGeoShape"; + } + + @Override + public Block evalVector(Vector v) { + BytesRefVector vector = (BytesRefVector) v; + int positionCount = v.getPositionCount(); + BytesRef scratchPad = new BytesRef(); + if (vector.isConstant()) { + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); + } + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + for (int p = 0; p < positionCount; p++) { + builder.appendBytesRef(evalValue(vector, p, scratchPad)); + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefVector container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToString.fromGeoShape(value); + } + + @Override + public Block evalBlock(Block b) { + BytesRefBlock block = (BytesRefBlock) b; + int positionCount = block.getPositionCount(); + try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { + BytesRef scratchPad = new BytesRef(); + for (int p = 0; p < positionCount; p++) { + int valueCount = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + int end = start + valueCount; + boolean positionOpened = false; + boolean valuesAppended = false; + for (int i = start; i < end; i++) { + BytesRef value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; + } + builder.appendBytesRef(value); + valuesAppended = true; + } + if (valuesAppended == false) { + builder.appendNull(); + } else if (positionOpened) { + builder.endPositionEntry(); + } + } + return builder.build(); + } + } + + private static BytesRef evalValue(BytesRefBlock container, int index, BytesRef scratchPad) { + BytesRef value = container.getBytesRef(index, scratchPad); + return ToString.fromGeoShape(value); + } + + public static class Factory implements EvalOperator.ExpressionEvaluator.Factory { + private final Source source; + + private final EvalOperator.ExpressionEvaluator.Factory field; + + public Factory(EvalOperator.ExpressionEvaluator.Factory field, Source source) { + this.field = field; + this.source = source; + } + + @Override + public ToStringFromGeoShapeEvaluator get(DriverContext context) { + return new ToStringFromGeoShapeEvaluator(field.get(context), source, context); + } + + @Override + public String toString() { + return "ToStringFromGeoShapeEvaluator[field=" + field + "]"; + } + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java index 43a16872fd99a..79ce1754f7163 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ColumnInfo.java @@ -162,14 +162,14 @@ protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Pa return builder.value(UTC_DATE_TIME_FORMATTER.formatMillis(longVal)); } }; - case "geo_point" -> new PositionToXContent(block) { + case "geo_point", "geo_shape" -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { return builder.value(GEO.wkbToWkt(((BytesRefBlock) block).getBytesRef(valueIndex, scratch))); } }; - case "cartesian_point" -> new PositionToXContent(block) { + case "cartesian_point", "cartesian_shape" -> new PositionToXContent(block) { @Override protected XContentBuilder valueToXContent(XContentBuilder builder, ToXContent.Params params, int valueIndex) throws IOException { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java index 40bc90d8c5b0c..d5dc12357f3fe 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java @@ -101,8 +101,8 @@ private static Object valueAt(String dataType, Block block, int offset, BytesRef } case "boolean" -> ((BooleanBlock) block).getBoolean(offset); case "version" -> new Version(((BytesRefBlock) block).getBytesRef(offset, scratch)).toString(); - case "geo_point" -> GEO.wkbToWkt(((BytesRefBlock) block).getBytesRef(offset, scratch)); - case "cartesian_point" -> CARTESIAN.wkbToWkt(((BytesRefBlock) block).getBytesRef(offset, scratch)); + case "geo_point", "geo_shape" -> GEO.wkbToWkt(((BytesRefBlock) block).getBytesRef(offset, scratch)); + case "cartesian_point", "cartesian_shape" -> CARTESIAN.wkbToWkt(((BytesRefBlock) block).getBytesRef(offset, scratch)); case "unsupported" -> UnsupportedValueSource.UNSUPPORTED_OUTPUT; case "_source" -> { BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); @@ -161,12 +161,12 @@ static Page valuesToPage(BlockFactory blockFactory, List columns, Li throw new UncheckedIOException(e); } } - case "geo_point" -> { + case "geo_point", "geo_shape" -> { // This just converts WKT to WKB, so does not need CRS knowledge, we could merge GEO and CARTESIAN here BytesRef wkb = GEO.wktToWkb(value.toString()); ((BytesRefBlock.Builder) builder).appendBytesRef(wkb); } - case "cartesian_point" -> { + case "cartesian_point", "cartesian_shape" -> { // This just converts WKT to WKB, so does not need CRS knowledge, we could merge GEO and CARTESIAN here BytesRef wkb = CARTESIAN.wktToWkb(value.toString()); ((BytesRefBlock.Builder) builder).appendBytesRef(wkb); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index d38dd57ff6aa8..b830e7a77f06a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -279,7 +279,9 @@ public static Failure validateBinaryComparison(BinaryComparison bc) { allowed.add(DataTypes.DATETIME); allowed.add(DataTypes.VERSION); allowed.add(EsqlDataTypes.GEO_POINT); + allowed.add(EsqlDataTypes.GEO_SHAPE); allowed.add(EsqlDataTypes.CARTESIAN_POINT); + allowed.add(EsqlDataTypes.CARTESIAN_SHAPE); if (bc instanceof Equals || bc instanceof NotEquals) { allowed.add(DataTypes.BOOLEAN); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java index 7f5a6079cc6d7..85b30032c1070 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java @@ -163,10 +163,7 @@ public final ExpressionEvaluator.Factory map(BinaryComparison bc, Layout layout) if (leftType == DataTypes.DATETIME) { return longs.apply(bc.source(), leftEval, rightEval); } - if (leftType == EsqlDataTypes.GEO_POINT) { - return geometries.apply(bc.source(), leftEval, rightEval, leftType); - } - if (leftType == EsqlDataTypes.CARTESIAN_POINT) { + if (EsqlDataTypes.isSpatial(leftType)) { return geometries.apply(bc.source(), leftEval, rightEval, leftType); } throw new EsqlIllegalArgumentException("resolved type for [" + bc + "] but didn't implement mapping"); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index f8d9bfbc160a8..0264d2b42eb35 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -21,10 +21,12 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Least; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBoolean; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianPoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianShape; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDatetime; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDegrees; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoPoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoShape; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToIP; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; @@ -177,10 +179,12 @@ private FunctionDefinition[][] functions() { new FunctionDefinition[] { def(ToBoolean.class, ToBoolean::new, "to_boolean", "to_bool"), def(ToCartesianPoint.class, ToCartesianPoint::new, "to_cartesianpoint"), + def(ToCartesianShape.class, ToCartesianShape::new, "to_cartesianshape"), def(ToDatetime.class, ToDatetime::new, "to_datetime", "to_dt"), def(ToDegrees.class, ToDegrees::new, "to_degrees"), def(ToDouble.class, ToDouble::new, "to_double", "to_dbl"), def(ToGeoPoint.class, ToGeoPoint::new, "to_geopoint"), + def(ToGeoShape.class, ToGeoShape::new, "to_geoshape"), def(ToIP.class, ToIP::new, "to_ip"), def(ToInteger.class, ToInteger::new, "to_integer", "to_int"), def(ToLong.class, ToLong::new, "to_long"), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java new file mode 100644 index 0000000000000..64db9c6f015ed --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShape.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_SHAPE; +import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; +import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; + +public class ToCartesianShape extends AbstractConvertFunction { + + private static final Map EVALUATORS = Map.ofEntries( + Map.entry(CARTESIAN_SHAPE, (fieldEval, source) -> fieldEval), + Map.entry(KEYWORD, ToCartesianShapeFromStringEvaluator.Factory::new), + Map.entry(TEXT, ToCartesianShapeFromStringEvaluator.Factory::new) + ); + + @FunctionInfo(returnType = "cartesian_shape", description = "Converts an input value to a shape value.") + public ToCartesianShape(Source source, @Param(name = "v", type = { "cartesian_shape", "keyword", "text" }) Expression field) { + super(source, field); + } + + @Override + protected Map factories() { + return EVALUATORS; + } + + @Override + public DataType dataType() { + return CARTESIAN_SHAPE; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new ToCartesianShape(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ToCartesianShape::new, field()); + } + + @ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class }) + static BytesRef fromKeyword(BytesRef in) { + return CARTESIAN.wktToWkb(in.utf8ToString()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java new file mode 100644 index 0000000000000..075c5e753d76f --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShape.java @@ -0,0 +1,64 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_SHAPE; +import static org.elasticsearch.xpack.ql.type.DataTypes.KEYWORD; +import static org.elasticsearch.xpack.ql.type.DataTypes.TEXT; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; + +public class ToGeoShape extends AbstractConvertFunction { + + private static final Map EVALUATORS = Map.ofEntries( + Map.entry(GEO_SHAPE, (fieldEval, source) -> fieldEval), + Map.entry(KEYWORD, ToGeoShapeFromStringEvaluator.Factory::new), + Map.entry(TEXT, ToGeoShapeFromStringEvaluator.Factory::new) + ); + + @FunctionInfo(returnType = "geo_shape", description = "Converts an input value to a geo_shape value.") + public ToGeoShape(Source source, @Param(name = "v", type = { "geo_shape", "keyword", "text" }) Expression field) { + super(source, field); + } + + @Override + protected Map factories() { + return EVALUATORS; + } + + @Override + public DataType dataType() { + return GEO_SHAPE; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new ToGeoShape(source(), newChildren.get(0)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, ToGeoShape::new, field()); + } + + @ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class }) + static BytesRef fromKeyword(BytesRef in) { + return GEO.wktToWkb(in.utf8ToString()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java index e157f508f9466..688996dd1db00 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToString.java @@ -23,7 +23,9 @@ import java.util.Map; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_POINT; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.CARTESIAN_SHAPE; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_POINT; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.GEO_SHAPE; import static org.elasticsearch.xpack.ql.type.DataTypes.BOOLEAN; import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; @@ -53,7 +55,9 @@ public class ToString extends AbstractConvertFunction implements EvaluatorMapper Map.entry(VERSION, ToStringFromVersionEvaluator.Factory::new), Map.entry(UNSIGNED_LONG, ToStringFromUnsignedLongEvaluator.Factory::new), Map.entry(GEO_POINT, ToStringFromGeoPointEvaluator.Factory::new), - Map.entry(CARTESIAN_POINT, ToStringFromCartesianPointEvaluator.Factory::new) + Map.entry(CARTESIAN_POINT, ToStringFromCartesianPointEvaluator.Factory::new), + Map.entry(CARTESIAN_SHAPE, ToStringFromCartesianShapeEvaluator.Factory::new), + Map.entry(GEO_SHAPE, ToStringFromGeoShapeEvaluator.Factory::new) ); @FunctionInfo(returnType = "keyword", description = "Converts a field into a string.") @@ -64,9 +68,11 @@ public ToString( type = { "boolean", "cartesian_point", + "cartesian_shape", "date", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", @@ -148,4 +154,14 @@ static BytesRef fromGeoPoint(BytesRef wkb) { static BytesRef fromCartesianPoint(BytesRef wkb) { return new BytesRef(CARTESIAN.wkbToWkt(wkb)); } + + @ConvertEvaluator(extraName = "FromCartesianShape") + static BytesRef fromCartesianShape(BytesRef wkb) { + return new BytesRef(GEO.wkbToWkt(wkb)); + } + + @ConvertEvaluator(extraName = "FromGeoShape") + static BytesRef fromGeoShape(BytesRef wkb) { + return new BytesRef(CARTESIAN.wkbToWkt(wkb)); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java index 29350203a966d..4fa89e66982e4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java @@ -39,9 +39,11 @@ public MvCount( type = { "boolean", "cartesian_point", + "cartesian_shape", "date", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java index 2bc8314959995..0f6bd847d68ed 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirst.java @@ -37,9 +37,11 @@ public class MvFirst extends AbstractMultivalueFunction { returnType = { "boolean", "cartesian_point", + "cartesian_shape", "date", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", @@ -56,9 +58,11 @@ public MvFirst( type = { "boolean", "cartesian_point", + "cartesian_shape", "date", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java index aad003a649cca..2881854d17f6f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLast.java @@ -37,9 +37,11 @@ public class MvLast extends AbstractMultivalueFunction { returnType = { "boolean", "cartesian_point", + "cartesian_shape", "date", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", @@ -56,9 +58,11 @@ public MvLast( type = { "boolean", "cartesian_point", + "cartesian_shape", "date", "double", "geo_point", + "geo_shape", "integer", "ip", "keyword", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 2b73d0bf9f7b1..f7f7ecd0118dd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -43,10 +43,12 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Least; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToBoolean; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianPoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToCartesianShape; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDatetime; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDegrees; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDouble; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoPoint; +import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToGeoShape; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToIP; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToInteger; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToLong; @@ -335,6 +337,8 @@ public static List namedTypeEntries() { of(ESQL_UNARY_SCLR_CLS, ToDatetime.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, ToDegrees.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, ToDouble.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), + of(ESQL_UNARY_SCLR_CLS, ToGeoShape.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), + of(ESQL_UNARY_SCLR_CLS, ToCartesianShape.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, ToGeoPoint.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, ToIP.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), of(ESQL_UNARY_SCLR_CLS, ToInteger.class, PlanNamedTypes::writeESQLUnaryScalar, PlanNamedTypes::readESQLUnaryScalar), @@ -1225,6 +1229,8 @@ static void writeBinaryLogic(PlanStreamOutput out, BinaryLogic binaryLogic) thro entry(name(ToDatetime.class), ToDatetime::new), entry(name(ToDegrees.class), ToDegrees::new), entry(name(ToDouble.class), ToDouble::new), + entry(name(ToGeoShape.class), ToGeoShape::new), + entry(name(ToCartesianShape.class), ToCartesianShape::new), entry(name(ToGeoPoint.class), ToGeoPoint::new), entry(name(ToIP.class), ToIP::new), entry(name(ToInteger.class), ToInteger::new), diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index 992c922693edd..8c9ab8afe41f9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -344,7 +344,7 @@ private PhysicalOperation planTopN(TopNExec topNExec, LocalExecutionPlannerConte case "version" -> TopNEncoder.VERSION; case "boolean", "null", "byte", "short", "integer", "long", "double", "float", "half_float", "datetime", "date_period", "time_duration", "object", "nested", "scaled_float", "unsigned_long", "_doc" -> TopNEncoder.DEFAULT_SORTABLE; - case "geo_point", "cartesian_point" -> TopNEncoder.DEFAULT_UNSORTABLE; + case "geo_point", "cartesian_point", "geo_shape", "cartesian_shape" -> TopNEncoder.DEFAULT_UNSORTABLE; // unsupported fields are encoded as BytesRef, we'll use the same encoder; all values should be null at this point case "unsupported" -> TopNEncoder.UNSUPPORTED; default -> throw new EsqlIllegalArgumentException("No TopN sorting encoder for type " + inverse.get(channel).type()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index 1c20e55f289c3..933b0174aebc0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -197,11 +197,10 @@ static QueryBuilder detectFilter(PhysicalPlan plan, String fieldName, Predicate< /** * Map QL's {@link DataType} to the compute engine's {@link ElementType}, for sortable types only. - * This specifically excludes GEO_POINT and CARTESIAN_POINT, which are backed by DataType.LONG - * but are not themselves sortable (the long can be sorted, but the sort order is not usually useful). + * This specifically excludes spatial data types, which are not themselves sortable. */ public static ElementType toSortableElementType(DataType dataType) { - if (dataType == EsqlDataTypes.GEO_POINT || dataType == EsqlDataTypes.CARTESIAN_POINT) { + if (EsqlDataTypes.isSpatial(dataType)) { return ElementType.UNKNOWN; } return toElementType(dataType); @@ -238,11 +237,7 @@ public static ElementType toElementType(DataType dataType) { if (dataType == EsQueryExec.DOC_DATA_TYPE) { return ElementType.DOC; } - // TODO: Spatial types can be read from source into BYTES_REF, or read from doc-values into LONG - if (dataType == EsqlDataTypes.GEO_POINT) { - return ElementType.BYTES_REF; - } - if (dataType == EsqlDataTypes.CARTESIAN_POINT) { + if (EsqlDataTypes.isSpatial(dataType)) { return ElementType.BYTES_REF; } throw EsqlIllegalArgumentException.illegalDataType(dataType); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java index eae808abb5037..e8cc5a77291bb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java @@ -47,6 +47,8 @@ public final class EsqlDataTypes { public static final DataType TIME_DURATION = new DataType("TIME_DURATION", null, Integer.BYTES + Long.BYTES, false, false, false); public static final DataType GEO_POINT = new DataType("geo_point", Double.BYTES * 2, false, false, false); public static final DataType CARTESIAN_POINT = new DataType("cartesian_point", Double.BYTES * 2, false, false, false); + public static final DataType GEO_SHAPE = new DataType("geo_shape", Integer.MAX_VALUE, false, false, false); + public static final DataType CARTESIAN_SHAPE = new DataType("cartesian_shape", Integer.MAX_VALUE, false, false, false); private static final Collection TYPES = Stream.of( BOOLEAN, @@ -72,7 +74,9 @@ public final class EsqlDataTypes { VERSION, UNSIGNED_LONG, GEO_POINT, - CARTESIAN_POINT + CARTESIAN_POINT, + CARTESIAN_SHAPE, + GEO_SHAPE ).sorted(Comparator.comparing(DataType::typeName)).toList(); private static final Map NAME_TO_TYPE = TYPES.stream().collect(toUnmodifiableMap(DataType::typeName, t -> t)); @@ -83,6 +87,7 @@ public final class EsqlDataTypes { Map map = TYPES.stream().filter(e -> e.esType() != null).collect(toMap(DataType::esType, t -> t)); // ES calls this 'point', but ESQL calls it 'cartesian_point' map.put("point", CARTESIAN_POINT); + map.put("shape", CARTESIAN_SHAPE); ES_TO_TYPE = Collections.unmodifiableMap(map); } @@ -167,7 +172,7 @@ public static boolean isNullOrTimeDuration(DataType t) { } public static boolean isSpatial(DataType t) { - return t == GEO_POINT || t == CARTESIAN_POINT; + return t == GEO_POINT || t == CARTESIAN_POINT || t == GEO_SHAPE || t == CARTESIAN_SHAPE; } /** diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 24e356520ff3d..c3b72fa3f2d0a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -152,6 +152,12 @@ private Page randomPage(List columns) { case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(randomIdentifier()).toBytesRef()); case "geo_point" -> ((BytesRefBlock.Builder) builder).appendBytesRef(GEO.asWkb(GeometryTestUtils.randomPoint())); case "cartesian_point" -> ((BytesRefBlock.Builder) builder).appendBytesRef(CARTESIAN.asWkb(ShapeTestUtils.randomPoint())); + case "geo_shape" -> ((BytesRefBlock.Builder) builder).appendBytesRef( + GEO.asWkb(GeometryTestUtils.randomGeometry(randomBoolean())) + ); + case "cartesian_shape" -> ((BytesRefBlock.Builder) builder).appendBytesRef( + CARTESIAN.asWkb(ShapeTestUtils.randomGeometry(randomBoolean())) + ); case "null" -> builder.appendNull(); case "_source" -> { try { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index a01e5bc5f55d2..56ac25a3561af 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -1254,78 +1254,78 @@ public void testEmptyEsRelationOnCountStar() throws IOException { } public void testUnsupportedFieldsInStats() { - var errorMsg = "Cannot use field [shape] with unsupported type [geo_shape]"; + var errorMsg = "Cannot use field [unsupported] with unsupported type [ip_range]"; verifyUnsupported(""" from test - | stats max(shape) + | stats max(unsupported) """, errorMsg); verifyUnsupported(""" from test - | stats max(int) by shape + | stats max(int) by unsupported """, errorMsg); verifyUnsupported(""" from test - | stats max(int) by bool, shape + | stats max(int) by bool, unsupported """, errorMsg); } public void testUnsupportedFieldsInEval() { - var errorMsg = "Cannot use field [shape] with unsupported type [geo_shape]"; + var errorMsg = "Cannot use field [unsupported] with unsupported type [ip_range]"; verifyUnsupported(""" from test - | eval x = shape + | eval x = unsupported """, errorMsg); verifyUnsupported(""" from test - | eval foo = 1, x = shape + | eval foo = 1, x = unsupported """, errorMsg); verifyUnsupported(""" from test - | eval x = 1 + shape + | eval x = 1 + unsupported """, errorMsg); } public void testUnsupportedFieldsInWhere() { - var errorMsg = "Cannot use field [shape] with unsupported type [geo_shape]"; + var errorMsg = "Cannot use field [unsupported] with unsupported type [ip_range]"; verifyUnsupported(""" from test - | where shape == "[1.0, 1.0]" + | where unsupported == "[1.0, 1.0]" """, errorMsg); verifyUnsupported(""" from test - | where int > 2 and shape == "[1.0, 1.0]" + | where int > 2 and unsupported == "[1.0, 1.0]" """, errorMsg); } public void testUnsupportedFieldsInSort() { - var errorMsg = "Cannot use field [shape] with unsupported type [geo_shape]"; + var errorMsg = "Cannot use field [unsupported] with unsupported type [ip_range]"; verifyUnsupported(""" from test - | sort shape + | sort unsupported """, errorMsg); verifyUnsupported(""" from test - | sort int, shape + | sort int, unsupported """, errorMsg); } public void testUnsupportedFieldsInDissect() { - var errorMsg = "Cannot use field [shape] with unsupported type [geo_shape]"; + var errorMsg = "Cannot use field [unsupported] with unsupported type [ip_range]"; verifyUnsupported(""" from test - | dissect shape \"%{foo}\" + | dissect unsupported \"%{foo}\" """, errorMsg); } public void testUnsupportedFieldsInGrok() { - var errorMsg = "Cannot use field [shape] with unsupported type [geo_shape]"; + var errorMsg = "Cannot use field [unsupported] with unsupported type [ip_range]"; verifyUnsupported(""" from test - | grok shape \"%{WORD:foo}\" + | grok unsupported \"%{WORD:foo}\" """, errorMsg); } @@ -1349,7 +1349,8 @@ public void testRegexOnInt() { public void testUnsupportedTypesWithToString() { // DATE_PERIOD and TIME_DURATION types have been added, but not really patched through the engine; i.e. supported. - final String supportedTypes = "boolean or cartesian_point or datetime or geo_point or ip or numeric or string or version"; + final String supportedTypes = + "boolean or cartesian_point or cartesian_shape or datetime or geo_point or geo_shape or ip or numeric or string or version"; verifyUnsupported( "row period = 1 year | eval to_string(period)", "line 1:28: argument of [to_string(period)] must be [" + supportedTypes + "], found value [period] type [date_period]" @@ -1358,7 +1359,10 @@ public void testUnsupportedTypesWithToString() { "row duration = 1 hour | eval to_string(duration)", "line 1:30: argument of [to_string(duration)] must be [" + supportedTypes + "], found value [duration] type [time_duration]" ); - verifyUnsupported("from test | eval to_string(shape)", "line 1:28: Cannot use field [shape] with unsupported type [geo_shape]"); + verifyUnsupported( + "from test | eval to_string(unsupported)", + "line 1:28: Cannot use field [unsupported] with unsupported type [ip_range]" + ); } public void testNonExistingEnrichPolicy() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index e3ff92000ab21..ff34823aa6d88 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -130,6 +130,8 @@ public static Literal randomLiteral(DataType type) { case "version" -> randomVersion().toBytesRef(); case "geo_point" -> GEO.asWkb(GeometryTestUtils.randomPoint()); case "cartesian_point" -> CARTESIAN.asWkb(ShapeTestUtils.randomPoint()); + case "geo_shape" -> GEO.asWkb(GeometryTestUtils.randomGeometry(randomBoolean())); + case "cartesian_shape" -> CARTESIAN.asWkb(ShapeTestUtils.randomGeometry(randomBoolean())); case "null" -> null; case "_source" -> { try { @@ -909,7 +911,9 @@ private static String typeErrorMessage(boolean includeOrdinal, List expectedValue.apply((BytesRef) n), warnings); } + /** + * Generate positive test cases for a unary function operating on an {@link EsqlDataTypes#GEO_SHAPE}. + */ + public static void forUnaryGeoShape( + List suppliers, + String expectedEvaluatorToString, + DataType expectedType, + Function expectedValue, + List warnings + ) { + unary(suppliers, expectedEvaluatorToString, geoShapeCases(), expectedType, n -> expectedValue.apply((BytesRef) n), warnings); + } + + /** + * Generate positive test cases for a unary function operating on an {@link EsqlDataTypes#CARTESIAN_SHAPE}. + */ + public static void forUnaryCartesianShape( + List suppliers, + String expectedEvaluatorToString, + DataType expectedType, + Function expectedValue, + List warnings + ) { + unary(suppliers, expectedEvaluatorToString, cartesianShapeCases(), expectedType, n -> expectedValue.apply((BytesRef) n), warnings); + } + /** * Generate positive test cases for a unary function operating on an {@link DataTypes#IP}. */ @@ -922,6 +948,26 @@ private static List cartesianPointCases() { ); } + private static List geoShapeCases() { + return List.of( + new TypedDataSupplier( + "", + () -> GEO.asWkb(GeometryTestUtils.randomGeometry(ESTestCase.randomBoolean())), + EsqlDataTypes.GEO_SHAPE + ) + ); + } + + private static List cartesianShapeCases() { + return List.of( + new TypedDataSupplier( + "", + () -> CARTESIAN.asWkb(ShapeTestUtils.randomGeometry(ESTestCase.randomBoolean())), + EsqlDataTypes.CARTESIAN_SHAPE + ) + ); + } + public static List ipCases() { return List.of( new TypedDataSupplier( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java new file mode 100644 index 0000000000000..961aaacab0423 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; + +public class ToCartesianShapeTests extends AbstractFunctionTestCase { + public ToCartesianShapeTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + // TODO multivalue fields + final String attribute = "Attribute[channel=0]"; + final Function evaluatorName = s -> "ToCartesianShape" + s + "Evaluator[field=" + attribute + "]"; + final List suppliers = new ArrayList<>(); + + TestCaseSupplier.forUnaryCartesianShape(suppliers, attribute, EsqlDataTypes.CARTESIAN_SHAPE, v -> v, List.of()); + // random strings that don't look like a geo point + TestCaseSupplier.forUnaryStrings( + suppliers, + evaluatorName.apply("FromString"), + EsqlDataTypes.CARTESIAN_SHAPE, + bytesRef -> null, + bytesRef -> { + var exception = expectThrows(Exception.class, () -> CARTESIAN.wktToWkb(bytesRef.utf8ToString())); + return List.of( + "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", + "Line -1:-1: " + exception + ); + } + ); + // strings that are cartesian_shape representations + TestCaseSupplier.unary( + suppliers, + evaluatorName.apply("FromString"), + List.of( + new TestCaseSupplier.TypedDataSupplier( + "", + () -> new BytesRef(CARTESIAN.asWkt(GeometryTestUtils.randomGeometry(randomBoolean()))), + DataTypes.KEYWORD + ) + ), + EsqlDataTypes.CARTESIAN_SHAPE, + bytesRef -> CARTESIAN.wktToWkb(((BytesRef) bytesRef).utf8ToString()), + List.of() + ); + + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + } + + @Override + protected Expression build(Source source, List args) { + return new ToCartesianShape(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java new file mode 100644 index 0000000000000..dd9fcbd4951d7 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; + +public class ToGeoShapeTests extends AbstractFunctionTestCase { + public ToGeoShapeTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + // TODO multivalue fields + final String attribute = "Attribute[channel=0]"; + final Function evaluatorName = s -> "ToGeoShape" + s + "Evaluator[field=" + attribute + "]"; + final List suppliers = new ArrayList<>(); + + TestCaseSupplier.forUnaryGeoShape(suppliers, attribute, EsqlDataTypes.GEO_SHAPE, v -> v, List.of()); + // random strings that don't look like a geo point + TestCaseSupplier.forUnaryStrings( + suppliers, + evaluatorName.apply("FromString"), + EsqlDataTypes.GEO_SHAPE, + bytesRef -> null, + bytesRef -> { + var exception = expectThrows(Exception.class, () -> GEO.wktToWkb(bytesRef.utf8ToString())); + return List.of( + "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", + "Line -1:-1: " + exception + ); + } + ); + // strings that are geo_shape representations + TestCaseSupplier.unary( + suppliers, + evaluatorName.apply("FromString"), + List.of( + new TestCaseSupplier.TypedDataSupplier( + "", + () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomGeometry(randomBoolean()))), + DataTypes.KEYWORD + ) + ), + EsqlDataTypes.GEO_SHAPE, + bytesRef -> GEO.wktToWkb(((BytesRef) bytesRef).utf8ToString()), + List.of() + ); + + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + } + + @Override + protected Expression build(Source source, List args) { + return new ToGeoShape(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java index 918956de08648..9d5eed2ca2ebe 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java @@ -101,6 +101,20 @@ public static Iterable parameters() { wkb -> new BytesRef(CARTESIAN.wkbToWkt(wkb)), List.of() ); + TestCaseSupplier.forUnaryGeoShape( + suppliers, + "ToStringFromGeoShapeEvaluator[field=" + read + "]", + DataTypes.KEYWORD, + wkb -> new BytesRef(GEO.wkbToWkt(wkb)), + List.of() + ); + TestCaseSupplier.forUnaryCartesianShape( + suppliers, + "ToStringFromCartesianShapeEvaluator[field=" + read + "]", + DataTypes.KEYWORD, + wkb -> new BytesRef(CARTESIAN.wkbToWkt(wkb)), + List.of() + ); TestCaseSupplier.forUnaryIp( suppliers, "ToStringFromIPEvaluator[field=" + read + "]", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java index d2e7e924fb95c..ecedb00e65597 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java @@ -11,7 +11,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.ShapeTestUtils; -import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.Geometry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; @@ -415,7 +415,7 @@ protected static void geoPoints( DataType expectedDataType, BiFunction, Matcher> matcher ) { - points(cases, name, evaluatorName, EsqlDataTypes.GEO_POINT, expectedDataType, GEO, GeometryTestUtils::randomPoint, matcher); + spatial(cases, name, evaluatorName, EsqlDataTypes.GEO_POINT, expectedDataType, GEO, GeometryTestUtils::randomPoint, matcher); } /** @@ -443,7 +443,7 @@ protected static void cartesianPoints( DataType expectedDataType, BiFunction, Matcher> matcher ) { - points( + spatial( cases, name, evaluatorName, @@ -456,20 +456,68 @@ protected static void cartesianPoints( } /** - * Build many test cases with either {@code geo_point} or {@code cartesian_point} values. + * Build many test cases with {@code geo_shape} values that are converted to another type. + * This assumes that the function consumes {@code geo_shape} values and produces another type. + * For example, mv_count() can consume geo_shapes and produce an integer count. */ - protected static void points( + protected static void geoShape( + List cases, + String name, + String evaluatorName, + DataType expectedDataType, + BiFunction, Matcher> matcher + ) { + spatial( + cases, + name, + evaluatorName, + EsqlDataTypes.GEO_SHAPE, + expectedDataType, + GEO, + () -> GeometryTestUtils.randomGeometry(randomBoolean()), + matcher + ); + } + + /** + * Build many test cases with {@code cartesian_shape} values that are converted to another type. + * This assumes that the function consumes {@code cartesian_shape} values and produces another type. + * For example, mv_count() can consume cartesian shapes and produce an integer count. + */ + protected static void cartesianShape( + List cases, + String name, + String evaluatorName, + DataType expectedDataType, + BiFunction, Matcher> matcher + ) { + spatial( + cases, + name, + evaluatorName, + EsqlDataTypes.CARTESIAN_SHAPE, + expectedDataType, + CARTESIAN, + () -> ShapeTestUtils.randomGeometry(randomBoolean()), + matcher + ); + } + + /** + * Build many test cases for spatial values + */ + protected static void spatial( List cases, String name, String evaluatorName, DataType dataType, DataType expectedDataType, SpatialCoordinateTypes spatial, - Supplier randomPoint, + Supplier randomGeometry, BiFunction, Matcher> matcher ) { cases.add(new TestCaseSupplier(name + "(" + dataType.typeName() + ")", List.of(dataType), () -> { - BytesRef wkb = spatial.asWkb(randomPoint.get()); + BytesRef wkb = spatial.asWkb(randomGeometry.get()); return new TestCaseSupplier.TestCase( List.of(new TestCaseSupplier.TypedData(List.of(wkb), dataType, "field")), evaluatorName + "[field=Attribute[channel=0]]", @@ -479,7 +527,7 @@ protected static void points( })); for (Block.MvOrdering ordering : Block.MvOrdering.values()) { cases.add(new TestCaseSupplier(name + "(<" + dataType.typeName() + "s>) " + ordering, List.of(dataType), () -> { - List mvData = randomList(1, 100, () -> spatial.asWkb(randomPoint.get())); + List mvData = randomList(1, 100, () -> spatial.asWkb(randomGeometry.get())); putInOrder(mvData, ordering); return new TestCaseSupplier.TestCase( List.of(new TestCaseSupplier.TypedData(mvData, dataType, "field")), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java index 1abbd62faa0bd..342baf405d0c3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java @@ -39,6 +39,8 @@ public static Iterable parameters() { dateTimes(cases, "mv_count", "MvCount", DataTypes.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); geoPoints(cases, "mv_count", "MvCount", DataTypes.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); cartesianPoints(cases, "mv_count", "MvCount", DataTypes.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); + geoShape(cases, "mv_count", "MvCount", DataTypes.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); + cartesianShape(cases, "mv_count", "MvCount", DataTypes.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, cases))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java index 91c30b7c1f566..0f52efe20399e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java @@ -41,6 +41,8 @@ public static Iterable parameters() { dateTimes(cases, "mv_first", "MvFirst", DataTypes.DATETIME, (size, values) -> equalTo(values.findFirst().getAsLong())); geoPoints(cases, "mv_first", "MvFirst", EsqlDataTypes.GEO_POINT, (size, values) -> equalTo(values.findFirst().get())); cartesianPoints(cases, "mv_first", "MvFirst", EsqlDataTypes.CARTESIAN_POINT, (size, values) -> equalTo(values.findFirst().get())); + geoShape(cases, "mv_first", "MvFirst", EsqlDataTypes.GEO_SHAPE, (size, values) -> equalTo(values.findFirst().get())); + cartesianShape(cases, "mv_first", "MvFirst", EsqlDataTypes.CARTESIAN_SHAPE, (size, values) -> equalTo(values.findFirst().get())); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java index 7577cbf7dd0a8..41abab22c72ef 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java @@ -47,6 +47,14 @@ public static Iterable parameters() { EsqlDataTypes.CARTESIAN_POINT, (size, values) -> equalTo(values.reduce((f, s) -> s).get()) ); + geoShape(cases, "mv_last", "MvLast", EsqlDataTypes.GEO_SHAPE, (size, values) -> equalTo(values.reduce((f, s) -> s).get())); + cartesianShape( + cases, + "mv_last", + "MvLast", + EsqlDataTypes.CARTESIAN_SHAPE, + (size, values) -> equalTo(values.reduce((f, s) -> s).get()) + ); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java index 37ab820146bf4..71aa945594584 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java @@ -94,8 +94,8 @@ protected final void validateType(BinaryOperator op, DataType lhsTyp equalTo( String.format( Locale.ROOT, - "first argument of [%s %s] must be [numeric, keyword, text, ip, datetime, version, geo_point or " - + "cartesian_point], found value [] type [%s]", + "first argument of [%s %s] must be [numeric, keyword, text, ip, datetime, version, geo_point, " + + "geo_shape, cartesian_point or cartesian_shape], found value [] type [%s]", lhsType.typeName(), rhsType.typeName(), lhsType.typeName() diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml index 06fc2c8a3fa99..8b28776e42fcd 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml @@ -142,7 +142,7 @@ unsupported: - match: { columns.9.name: geo_point_alias } - match: { columns.9.type: geo_point } - match: { columns.10.name: geo_shape } - - match: { columns.10.type: unsupported } + - match: { columns.10.type: geo_shape } - match: { columns.11.name: histogram } - match: { columns.11.type: unsupported } - match: { columns.12.name: integer_range } @@ -170,7 +170,7 @@ unsupported: - match: { columns.23.name: search_as_you_type._index_prefix } - match: { columns.23.type: unsupported } - match: { columns.24.name: shape } - - match: { columns.24.type: unsupported } + - match: { columns.24.type: cartesian_shape } - match: { columns.25.name: some_doc.bar } - match: { columns.25.type: long } - match: { columns.26.name: some_doc.foo } @@ -191,7 +191,7 @@ unsupported: - match: { values.0.7: null } - match: { values.0.8: "POINT (10.0 12.0)" } - match: { values.0.9: "POINT (10.0 12.0)" } - - match: { values.0.10: null } + - match: { values.0.10: "LINESTRING (-97.154 25.996, -97.159 25.998, -97.181 25.991, -97.187 25.985)" } - match: { values.0.11: null } - match: { values.0.12: null } - match: { values.0.13: null } @@ -205,7 +205,7 @@ unsupported: - match: { values.0.21: null } - match: { values.0.22: null } - match: { values.0.23: null } - - match: { values.0.24: null } + - match: { values.0.24: "LINESTRING (-377.03653 389.897676, -377.009051 389.889939)" } - match: { values.0.25: 12 } - match: { values.0.26: xy } - match: { values.0.27: "foo bar" } @@ -238,7 +238,7 @@ unsupported: - match: { columns.9.name: geo_point_alias } - match: { columns.9.type: geo_point } - match: { columns.10.name: geo_shape } - - match: { columns.10.type: unsupported } + - match: { columns.10.type: geo_shape } - match: { columns.11.name: histogram } - match: { columns.11.type: unsupported } - match: { columns.12.name: integer_range } @@ -266,7 +266,7 @@ unsupported: - match: { columns.23.name: search_as_you_type._index_prefix } - match: { columns.23.type: unsupported } - match: { columns.24.name: shape } - - match: { columns.24.type: unsupported } + - match: { columns.24.type: cartesian_shape } - match: { columns.25.name: some_doc.bar } - match: { columns.25.type: long } - match: { columns.26.name: some_doc.foo } @@ -282,8 +282,8 @@ unsupported: - do: esql.query: body: - query: 'from test | keep shape | limit 0' - - match: { columns.0.name: shape } + query: 'from test | keep histogram | limit 0' + - match: { columns.0.name: histogram } - match: { columns.0.type: unsupported } - length: { values: 0 } @@ -322,7 +322,7 @@ unsupported with sort: - match: { columns.9.name: geo_point_alias } - match: { columns.9.type: geo_point } - match: { columns.10.name: geo_shape } - - match: { columns.10.type: unsupported } + - match: { columns.10.type: geo_shape } - match: { columns.11.name: histogram } - match: { columns.11.type: unsupported } - match: { columns.12.name: integer_range } @@ -350,7 +350,7 @@ unsupported with sort: - match: { columns.23.name: search_as_you_type._index_prefix } - match: { columns.23.type: unsupported } - match: { columns.24.name: shape } - - match: { columns.24.type: unsupported } + - match: { columns.24.type: cartesian_shape } - match: { columns.25.name: some_doc.bar } - match: { columns.25.type: long } - match: { columns.26.name: some_doc.foo } @@ -371,7 +371,7 @@ unsupported with sort: - match: { values.0.7: null } - match: { values.0.8: "POINT (10.0 12.0)" } - match: { values.0.9: "POINT (10.0 12.0)" } - - match: { values.0.10: null } + - match: { values.0.10: "LINESTRING (-97.154 25.996, -97.159 25.998, -97.181 25.991, -97.187 25.985)" } - match: { values.0.11: null } - match: { values.0.12: null } - match: { values.0.13: null } @@ -385,7 +385,7 @@ unsupported with sort: - match: { values.0.21: null } - match: { values.0.22: null } - match: { values.0.23: null } - - match: { values.0.24: null } + - match: { values.0.24: "LINESTRING (-377.03653 389.897676, -377.009051 389.889939)" } - match: { values.0.25: 12 } - match: { values.0.26: xy } - match: { values.0.27: "foo bar" } From 85e1ee404f6f79f78a4c9558d2d875384fb7eff9 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 17 Jan 2024 15:01:49 +0100 Subject: [PATCH 76/95] Use LogDocMergePolicy in VariableWidthHistogramAggregatorTests (#104392) --- .../histogram/VariableWidthHistogramAggregatorTests.java | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java index bbeeb855f8d18..99be8590e06f2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregatorTests.java @@ -11,10 +11,14 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LogDocMergePolicy; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.analysis.MockAnalyzer; import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; @@ -603,7 +607,10 @@ private void testSearchCase( final Consumer verify ) throws IOException { try (Directory directory = newDirectory()) { - try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + IndexWriterConfig config = LuceneTestCase.newIndexWriterConfig(random(), new MockAnalyzer(random())); + // Use LogDocMergePolicy to avoid randomization issues with the doc retrieval order. + config.setMergePolicy(new LogDocMergePolicy()); + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, config)) { indexSampleData(dataset, indexWriter, multipleSegments); } From 919d282aa6a20156c8520b01191fdf311523c302 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 17 Jan 2024 09:06:19 -0500 Subject: [PATCH 77/95] ESQL: Add option to drop null fields (#102428) This adds an option to drop columns that are entirely null from the results. Is this something we want? --- docs/changelog/102428.yaml | 5 + docs/reference/esql/esql-query-api.asciidoc | 23 +- .../rest-api-spec/api/esql.async_query.json | 5 + .../rest-api-spec/api/esql.query.json | 5 + .../data/SingletonOrdinalsBuilder.java | 4 - .../compute/data/BlockBuilderTests.java | 19 +- .../xpack/esql/action/EsqlQueryResponse.java | 31 ++- .../esql/action/ResponseXContentUtils.java | 79 ++++-- .../esql/action/RestEsqlAsyncQueryAction.java | 4 +- .../esql/action/RestEsqlQueryAction.java | 3 +- .../esql/action/EsqlQueryResponseTests.java | 74 ++++- .../rest-api-spec/test/esql/110_all_null.yml | 263 ++++++++++++++++++ 12 files changed, 451 insertions(+), 64 deletions(-) create mode 100644 docs/changelog/102428.yaml create mode 100644 x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/110_all_null.yml diff --git a/docs/changelog/102428.yaml b/docs/changelog/102428.yaml new file mode 100644 index 0000000000000..275492fa6a888 --- /dev/null +++ b/docs/changelog/102428.yaml @@ -0,0 +1,5 @@ +pr: 102428 +summary: "ESQL: Add option to drop null fields" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/reference/esql/esql-query-api.asciidoc b/docs/reference/esql/esql-query-api.asciidoc index bbfa41538528a..e1e27be12a36f 100644 --- a/docs/reference/esql/esql-query-api.asciidoc +++ b/docs/reference/esql/esql-query-api.asciidoc @@ -43,6 +43,12 @@ or alias you search. (Optional, string) Separator for CSV results. Defaults to `,`. The API only supports this parameter for CSV responses. +`drop_null_columns`:: +(Optional, boolean) Should columns that are entirely `null` be removed from +the `columns` and `values` portion of the results? Defaults to `false`. If +`true` the the response will include an extra section under the name +`all_columns` which has the name of all columns. + `format`:: (Optional, string) Format for the response. For valid values, refer to <>. @@ -75,17 +81,12 @@ responses. See <>. `columns`:: (array of objects) -Column headings for the search results. Each object is a column. -+ -.Properties of `columns` objects -[%collapsible%open] -===== -`name`:: -(string) Name of the column. - -`type`:: -(string) Data type for the column. -===== +Column `name` and `type` for each column returned in `values`. Each object is a single column. + +`all_columns`:: +(array of objects) +Column `name` and `type` for each queried column. Each object is a single column. This is only +returned if `drop_null_columns` is sent with the request. `rows`:: (array of arrays) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query.json index a2bcf67e8611c..85a2a46c8335d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query.json @@ -29,6 +29,11 @@ "type":"string", "description":"The character to use between values within a CSV row. Only valid for the csv format.", "default":false + }, + "drop_null_columns": { + "type": "boolean", + "description": "Should entirely null columns be removed from the results? Their name and type will be returning in a new `all_columns` section.", + "default": false } }, "body":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.query.json index 8810746851468..573fde5d9a9cd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.query.json @@ -29,6 +29,11 @@ "type":"string", "description":"The character to use between values within a CSV row. Only valid for the csv format.", "default":false + }, + "drop_null_columns": { + "type": "boolean", + "description": "Should entirely null columns be removed from the results? Their name and type will be returning in a new `all_columns` section.", + "default": false } }, "body":{ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilder.java index fb83432ba0565..10e9237ef7071 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/SingletonOrdinalsBuilder.java @@ -43,10 +43,6 @@ public SingletonOrdinalsBuilder appendOrd(int value) { return this; } - int[] ords() { - return ords; - } - @Override public SingletonOrdinalsBuilder beginPositionEntry() { throw new UnsupportedOperationException("should only have one value per doc"); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderTests.java index 3c822da7b5586..a48e22e9ccefa 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderTests.java @@ -46,11 +46,11 @@ public BlockBuilderTests(ElementType elementType) { } public void testAllNulls() { - for (int numEntries : List.of(1, randomIntBetween(1, 100))) { + for (int numEntries : List.of(1, between(1, 100), between(101, 1000))) { testAllNullsImpl(elementType.newBlockBuilder(0, blockFactory), numEntries); - testAllNullsImpl(elementType.newBlockBuilder(100, blockFactory), numEntries); - testAllNullsImpl(elementType.newBlockBuilder(1000, blockFactory), numEntries); - testAllNullsImpl(elementType.newBlockBuilder(randomIntBetween(0, 100), blockFactory), numEntries); + testAllNullsImpl(elementType.newBlockBuilder(numEntries, blockFactory), numEntries); + testAllNullsImpl(elementType.newBlockBuilder(numEntries * 10, blockFactory), numEntries); + testAllNullsImpl(elementType.newBlockBuilder(between(0, numEntries), blockFactory), numEntries); } } @@ -60,17 +60,14 @@ private void testAllNullsImpl(Block.Builder builder, int numEntries) { } try (Block block = builder.build()) { assertThat(block.getPositionCount(), is(numEntries)); - assertThat(block.isNull(0), is(true)); - assertThat(block.isNull(numEntries - 1), is(true)); - assertThat(block.isNull(randomPosition(numEntries)), is(true)); + for (int p = 0; p < numEntries; p++) { + assertThat(block.isNull(p), is(true)); + } + assertThat(block.areAllValuesNull(), is(true)); } assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); } - static int randomPosition(int positionCount) { - return positionCount == 1 ? 0 : randomIntBetween(0, positionCount - 1); - } - public void testCloseWithoutBuilding() { elementType.newBlockBuilder(10, blockFactory).close(); assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java index 63686820574b5..1763e36707958 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java @@ -39,6 +39,8 @@ public class EsqlQueryResponse extends ActionResponse implements ChunkedToXConte @SuppressWarnings("this-escape") private final AbstractRefCounted counted = AbstractRefCounted.of(this::closeInternal); + public static final String DROP_NULL_COLUMNS_OPTION = "drop_null_columns"; + private final List columns; private final List pages; private final Profile profile; @@ -160,20 +162,45 @@ private Iterator asyncPropertiesOrEmpty() { @Override public Iterator toXContentChunked(ToXContent.Params params) { - final Iterator valuesIt = ResponseXContentUtils.columnValues(this.columns, this.pages, columnar); + boolean dropNullColumns = params.paramAsBoolean(DROP_NULL_COLUMNS_OPTION, false); + boolean[] nullColumns = dropNullColumns ? nullColumns() : null; + Iterator columnHeadings = dropNullColumns + ? Iterators.concat( + ResponseXContentUtils.allColumns(columns, "all_columns"), + ResponseXContentUtils.nonNullColumns(columns, nullColumns, "columns") + ) + : ResponseXContentUtils.allColumns(columns, "columns"); + Iterator valuesIt = ResponseXContentUtils.columnValues(this.columns, this.pages, columnar, nullColumns); Iterator profileRender = profile == null ? List.of().iterator() : ChunkedToXContentHelper.field("profile", profile, params); return Iterators.concat( ChunkedToXContentHelper.startObject(), asyncPropertiesOrEmpty(), - ResponseXContentUtils.columnHeadings(columns), + columnHeadings, ChunkedToXContentHelper.array("values", valuesIt), profileRender, ChunkedToXContentHelper.endObject() ); } + private boolean[] nullColumns() { + boolean[] nullColumns = new boolean[columns.size()]; + for (int c = 0; c < nullColumns.length; c++) { + nullColumns[c] = allColumnsAreNull(c); + } + return nullColumns; + } + + private boolean allColumnsAreNull(int c) { + for (Page page : pages) { + if (page.getBlock(c).areAllValuesNull() == false) { + return false; + } + } + return true; + } + @Override public boolean isFragment() { return false; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java index e28e6beebabed..ca40faff81c55 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java @@ -23,10 +23,12 @@ */ final class ResponseXContentUtils { - /** Returns the column headings for the given columns. */ - static Iterator columnHeadings(List columns) { + /** + * Returns the column headings for the given columns. + */ + static Iterator allColumns(List columns, String name) { return ChunkedToXContentHelper.singleChunk((builder, params) -> { - builder.startArray("columns"); + builder.startArray(name); for (ColumnInfo col : columns) { col.toXContent(builder, params); } @@ -34,43 +36,62 @@ static Iterator columnHeadings(List columns) { }); } + /** + * Returns the column headings for the given columns, moving the heading + * for always-null columns to a {@code null_columns} section. + */ + static Iterator nonNullColumns(List columns, boolean[] nullColumns, String name) { + return ChunkedToXContentHelper.singleChunk((builder, params) -> { + builder.startArray(name); + for (int c = 0; c < columns.size(); c++) { + if (nullColumns[c] == false) { + columns.get(c).toXContent(builder, params); + } + } + return builder.endArray(); + }); + } + /** Returns the column values for the given pages (described by the column infos). */ - static Iterator columnValues(List columns, List pages, boolean columnar) { + static Iterator columnValues( + List columns, + List pages, + boolean columnar, + boolean[] nullColumns + ) { if (pages.isEmpty()) { return Collections.emptyIterator(); } else if (columnar) { - return columnarValues(columns, pages); + return columnarValues(columns, pages, nullColumns); } else { - return rowValues(columns, pages); + return rowValues(columns, pages, nullColumns); } } /** Returns a columnar based representation of the values in the given pages (described by the column infos). */ - static Iterator columnarValues(List columns, List pages) { + static Iterator columnarValues(List columns, List pages, boolean[] nullColumns) { final BytesRef scratch = new BytesRef(); - return Iterators.flatMap( - Iterators.forRange( - 0, - columns.size(), - column -> Iterators.concat( - Iterators.single(((builder, params) -> builder.startArray())), - Iterators.flatMap(pages.iterator(), page -> { - ColumnInfo.PositionToXContent toXContent = columns.get(column).positionToXContent(page.getBlock(column), scratch); - return Iterators.forRange( - 0, - page.getPositionCount(), - position -> (builder, params) -> toXContent.positionToXContent(builder, params, position) - ); - }), - ChunkedToXContentHelper.endArray() - ) - ), - Function.identity() - ); + return Iterators.flatMap(Iterators.forRange(0, columns.size(), column -> { + if (nullColumns != null && nullColumns[column]) { + return Collections.emptyIterator(); + } + return Iterators.concat( + Iterators.single(((builder, params) -> builder.startArray())), + Iterators.flatMap(pages.iterator(), page -> { + ColumnInfo.PositionToXContent toXContent = columns.get(column).positionToXContent(page.getBlock(column), scratch); + return Iterators.forRange( + 0, + page.getPositionCount(), + position -> (builder, params) -> toXContent.positionToXContent(builder, params, position) + ); + }), + ChunkedToXContentHelper.endArray() + ); + }), Function.identity()); } /** Returns a row based representation of the values in the given pages (described by the column infos). */ - static Iterator rowValues(List columns, List pages) { + static Iterator rowValues(List columns, List pages, boolean[] nullColumns) { final BytesRef scratch = new BytesRef(); return Iterators.flatMap(pages.iterator(), page -> { final int columnCount = columns.size(); @@ -82,7 +103,9 @@ static Iterator rowValues(List columns, List

    (builder, params) -> { builder.startArray(); for (int c = 0; c < columnCount; c++) { - toXContents[c].positionToXContent(builder, params, position); + if (nullColumns == null || nullColumns[c] == false) { + toXContents[c].positionToXContent(builder, params, position); + } } return builder.endArray(); }); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java index 3dea461ccf8b7..0b2bad2eb22d3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java @@ -18,11 +18,11 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xpack.esql.action.EsqlQueryResponse.DROP_NULL_COLUMNS_OPTION; import static org.elasticsearch.xpack.esql.formatter.TextFormat.URL_PARAM_DELIMITER; @ServerlessScope(Scope.PUBLIC) @@ -60,6 +60,6 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli @Override protected Set responseParams() { - return Collections.singleton(URL_PARAM_DELIMITER); + return Set.of(URL_PARAM_DELIMITER, DROP_NULL_COLUMNS_OPTION); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java index 6b8e7fc397865..070c0e112e051 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java @@ -19,7 +19,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Set; @@ -65,6 +64,6 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli @Override protected Set responseParams() { - return Collections.singleton(URL_PARAM_DELIMITER); + return Set.of(URL_PARAM_DELIMITER, EsqlQueryResponse.DROP_NULL_COLUMNS_OPTION); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index c3b72fa3f2d0a..3b64870a15839 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParserConstructor; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; @@ -56,10 +57,13 @@ import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.stream.Stream; +import static org.elasticsearch.common.xcontent.ChunkedToXContent.wrapAsToXContent; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.xpack.esql.action.EsqlQueryResponse.DROP_NULL_COLUMNS_OPTION; import static org.elasticsearch.xpack.esql.action.ResponseValueUtils.valuesToPage; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; @@ -329,28 +333,38 @@ public void testChunkResponseSizeRows() { public void testSimpleXContentColumnar() { try (EsqlQueryResponse response = simple(true)) { - assertThat(Strings.toString(response), equalTo(""" + assertThat(Strings.toString(wrapAsToXContent(response)), equalTo(""" {"columns":[{"name":"foo","type":"integer"}],"values":[[40,80]]}""")); } } + public void testSimpleXContentColumnarDropNulls() { + try (EsqlQueryResponse response = simple(true)) { + assertThat( + Strings.toString(wrapAsToXContent(response), new ToXContent.MapParams(Map.of(DROP_NULL_COLUMNS_OPTION, "true"))), + equalTo(""" + {"all_columns":[{"name":"foo","type":"integer"}],"columns":[{"name":"foo","type":"integer"}],"values":[[40,80]]}""") + ); + } + } + public void testSimpleXContentColumnarAsync() { try (EsqlQueryResponse response = simple(true, true)) { - assertThat(Strings.toString(response), equalTo(""" + assertThat(Strings.toString(wrapAsToXContent(response)), equalTo(""" {"is_running":false,"columns":[{"name":"foo","type":"integer"}],"values":[[40,80]]}""")); } } public void testSimpleXContentRows() { try (EsqlQueryResponse response = simple(false)) { - assertThat(Strings.toString(response), equalTo(""" + assertThat(Strings.toString(wrapAsToXContent(response)), equalTo(""" {"columns":[{"name":"foo","type":"integer"}],"values":[[40],[80]]}""")); } } public void testSimpleXContentRowsAsync() { try (EsqlQueryResponse response = simple(false, true)) { - assertThat(Strings.toString(response), equalTo(""" + assertThat(Strings.toString(wrapAsToXContent(response)), equalTo(""" {"is_running":false,"columns":[{"name":"foo","type":"integer"}],"values":[[40],[80]]}""")); } } @@ -372,6 +386,58 @@ public void testBasicXContentIdAndRunning() { } } + public void testNullColumnsXContentDropNulls() { + try ( + EsqlQueryResponse response = new EsqlQueryResponse( + List.of(new ColumnInfo("foo", "integer"), new ColumnInfo("all_null", "integer")), + List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock(), blockFactory.newConstantNullBlock(2))), + null, + false, + null, + false, + false + ) + ) { + assertThat( + Strings.toString(wrapAsToXContent(response), new ToXContent.MapParams(Map.of(DROP_NULL_COLUMNS_OPTION, "true"))), + equalTo("{" + """ + "all_columns":[{"name":"foo","type":"integer"},{"name":"all_null","type":"integer"}],""" + """ + "columns":[{"name":"foo","type":"integer"}],""" + """ + "values":[[40],[80]]}""") + ); + } + } + + /** + * This is a paranoid test to make sure the {@link Block}s produced by {@link Block.Builder} + * that contain only {@code null} entries are properly recognized by the {@link EsqlQueryResponse#DROP_NULL_COLUMNS_OPTION}. + */ + public void testNullColumnsFromBuilderXContentDropNulls() { + try (IntBlock.Builder b = blockFactory.newIntBlockBuilder(2)) { + b.appendNull(); + b.appendNull(); + try ( + EsqlQueryResponse response = new EsqlQueryResponse( + List.of(new ColumnInfo("foo", "integer"), new ColumnInfo("all_null", "integer")), + List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock(), b.build())), + null, + false, + null, + false, + false + ) + ) { + assertThat( + Strings.toString(wrapAsToXContent(response), new ToXContent.MapParams(Map.of(DROP_NULL_COLUMNS_OPTION, "true"))), + equalTo("{" + """ + "all_columns":[{"name":"foo","type":"integer"},{"name":"all_null","type":"integer"}],""" + """ + "columns":[{"name":"foo","type":"integer"}],""" + """ + "values":[[40],[80]]}""") + ); + } + } + } + private EsqlQueryResponse simple(boolean columnar) { return simple(columnar, false); } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/110_all_null.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/110_all_null.yml new file mode 100644 index 0000000000000..0f8dbbb97f57f --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/110_all_null.yml @@ -0,0 +1,263 @@ +--- +setup: + - skip: + version: " - 8.12.99" + reason: "feature added in 8.13" + + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 5 + mappings: + properties: + data: + type: long + data_d: + type: double + count: + type: long + count_d: + type: double + time: + type: long + color: + type: keyword + always_null: + type: keyword + non_null_out_of_match: + type: keyword + - do: + bulk: + index: "test" + refresh: true + body: + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275187, "color": "red" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275188, "color": "blue" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275189, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275190, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275191, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275192, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275193, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275194, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275195, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275196, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275197, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275198, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275199, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275200, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275201, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275202, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275203, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275204, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275205, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275206, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275207, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275208, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275209, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275210, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275211, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275212, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275213, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275214, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275215, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275216, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275217, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275218, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275219, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275220, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275221, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275222, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 40, "data_d": 1, "count_d": 40, "time": 1674835275223, "color": "red", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 42, "data_d": 2, "count_d": 42, "time": 1674835275224, "color": "blue", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 1, "count": 44, "data_d": 1, "count_d": 44, "time": 1674835275225, "color": "green", "non_null_out_of_match": "a" } + - { "index": { } } + - { "data": 2, "count": 46, "data_d": 2, "count_d": 46, "time": 1674835275226, "color": "red", "non_null_out_of_match": "a" } + +--- +row wise and keep null: + - do: + esql.query: + drop_null_columns: false + body: + query: 'FROM test | WHERE time <= 1674835275188 | SORT time ASC | LIMIT 2' + columnar: false + + - length: {columns: 8} + - match: {columns.0.name: "always_null"} + - match: {columns.0.type: "keyword"} + - match: {columns.1.name: "color"} + - match: {columns.1.type: "keyword"} + - match: {columns.2.name: "count"} + - match: {columns.2.type: "long"} + - match: {columns.3.name: "count_d"} + - match: {columns.3.type: "double"} + - match: {columns.4.name: "data"} + - match: {columns.4.type: "long"} + - match: {columns.5.name: "data_d"} + - match: {columns.5.type: "double"} + - match: {columns.6.name: "non_null_out_of_match"} + - match: {columns.6.type: "keyword"} + - match: {columns.7.name: "time"} + - match: {columns.7.type: "long"} + - length: {values: 2} + - length: {values.0: 8} + - is_false: values.0.0 + - match: {values.0.1: red} + +--- +row wise and drop null: + - do: + esql.query: + drop_null_columns: true + body: + query: 'FROM test | WHERE time <= 1674835275188 | SORT time ASC | LIMIT 2' + columnar: false + + - length: {all_columns: 8} + - match: {all_columns.0.name: "always_null"} + - match: {all_columns.0.type: "keyword"} + - match: {all_columns.1.name: "color"} + - match: {all_columns.1.type: "keyword"} + - match: {all_columns.2.name: "count"} + - match: {all_columns.2.type: "long"} + - match: {all_columns.3.name: "count_d"} + - match: {all_columns.3.type: "double"} + - match: {all_columns.4.name: "data"} + - match: {all_columns.4.type: "long"} + - match: {all_columns.5.name: "data_d"} + - match: {all_columns.5.type: "double"} + - match: {all_columns.6.name: "non_null_out_of_match"} + - match: {all_columns.6.type: "keyword"} + - match: {all_columns.7.name: "time"} + - match: {all_columns.7.type: "long"} + - length: {columns: 6} + - match: {columns.0.name: "color"} + - match: {columns.0.type: "keyword"} + - match: {columns.1.name: "count"} + - match: {columns.1.type: "long"} + - match: {columns.2.name: "count_d"} + - match: {columns.2.type: "double"} + - match: {columns.3.name: "data"} + - match: {columns.3.type: "long"} + - match: {columns.4.name: "data_d"} + - match: {columns.4.type: "double"} + - match: {columns.5.name: "time"} + - match: {columns.5.type: "long"} + - length: {values: 2} + - length: {values.0: 6} + - match: {values.0.0: red} + +--- +columnar and keep null: + - do: + esql.query: + drop_null_columns: false + body: + query: 'FROM test | WHERE time <= 1674835275188 | SORT time ASC | LIMIT 2' + columnar: true + + - length: {columns: 8} + - match: {columns.0.name: "always_null"} + - match: {columns.0.type: "keyword"} + - match: {columns.1.name: "color"} + - match: {columns.1.type: "keyword"} + - match: {columns.2.name: "count"} + - match: {columns.2.type: "long"} + - match: {columns.3.name: "count_d"} + - match: {columns.3.type: "double"} + - match: {columns.4.name: "data"} + - match: {columns.4.type: "long"} + - match: {columns.5.name: "data_d"} + - match: {columns.5.type: "double"} + - match: {columns.6.name: "non_null_out_of_match"} + - match: {columns.6.type: "keyword"} + - match: {columns.7.name: "time"} + - match: {columns.7.type: "long"} + - length: {values: 8} + - length: {values.0: 2} + - is_false: values.0.0 + - match: {values.1.0: red} + +--- +columnar and drop null: + - do: + esql.query: + drop_null_columns: true + body: + query: 'FROM test | WHERE time <= 1674835275188 | SORT time ASC | LIMIT 2' + columnar: true + + - length: {all_columns: 8} + - match: {all_columns.0.name: "always_null"} + - match: {all_columns.0.type: "keyword"} + - match: {all_columns.1.name: "color"} + - match: {all_columns.1.type: "keyword"} + - match: {all_columns.2.name: "count"} + - match: {all_columns.2.type: "long"} + - match: {all_columns.3.name: "count_d"} + - match: {all_columns.3.type: "double"} + - match: {all_columns.4.name: "data"} + - match: {all_columns.4.type: "long"} + - match: {all_columns.5.name: "data_d"} + - match: {all_columns.5.type: "double"} + - match: {all_columns.6.name: "non_null_out_of_match"} + - match: {all_columns.6.type: "keyword"} + - match: {all_columns.7.name: "time"} + - match: {all_columns.7.type: "long"} + - length: {columns: 6} + - match: {columns.0.name: "color"} + - match: {columns.0.type: "keyword"} + - match: {columns.1.name: "count"} + - match: {columns.1.type: "long"} + - match: {columns.2.name: "count_d"} + - match: {columns.2.type: "double"} + - match: {columns.3.name: "data"} + - match: {columns.3.type: "long"} + - match: {columns.4.name: "data_d"} + - match: {columns.4.type: "double"} + - match: {columns.5.name: "time"} + - match: {columns.5.type: "long"} + - length: {values: 6} + - length: {values.0: 2} + - match: {values.0.0: red} From afd915af1e767495baede210c17b5769ea5b11b4 Mon Sep 17 00:00:00 2001 From: Gareth Ellis Date: Wed, 17 Jan 2024 15:27:12 +0100 Subject: [PATCH 78/95] Adding threadpool metrics (#102371) This implements metrics for the threadpools. The aim is to emit metrics for the various threadpools, the metric callback should be created when the threadpool is created, and removed before the threadpool is shutdown. The PR also includes a test for the new metrics, and some additions to the metrics test plugin. Finally the metric name check has been modified to allow some of the non compliant threadpools (too long, includes - ) --------- Co-authored-by: Elastic Machine Co-authored-by: Przemyslaw Gomulka --- ...sAvailabilityHealthIndicatorBenchmark.java | 3 +- docs/changelog/102371.yaml | 5 + .../apm/internal/MetricNameValidator.java | 25 +++++ .../internal/MetricNameValidatorTests.java | 8 ++ .../ingest/geoip/GeoIpDownloaderTests.java | 3 +- .../Netty4SizeHeaderFrameDecoderTests.java | 3 +- .../threadpool/SimpleThreadPoolIT.java | 78 +++++++++++++++ .../EsRejectedExecutionHandler.java | 11 +++ .../elasticsearch/node/NodeConstruction.java | 21 ++-- .../elasticsearch/threadpool/ThreadPool.java | 97 ++++++++++++++++++- .../TransportMultiSearchActionTests.java | 5 +- .../search/TransportSearchActionTests.java | 3 +- .../TransportActionFilterChainTests.java | 6 +- .../AbstractClientHeadersTestCase.java | 3 +- .../http/HttpClientStatsTrackerTests.java | 3 +- .../threadpool/FixedThreadPoolTests.java | 3 +- .../threadpool/ScalingThreadPoolTests.java | 3 +- .../ScheduleWithFixedDelayTests.java | 11 ++- .../ThreadPoolSerializationTests.java | 3 +- .../UpdateThreadPoolSettingsTests.java | 20 ++-- .../ClusterConnectionManagerTests.java | 3 +- .../telemetry/MetricRecorder.java | 7 ++ .../telemetry/TestTelemetryPlugin.java | 13 +++ .../threadpool/TestThreadPool.java | 3 +- .../authc/AuthenticationServiceTests.java | 1 + .../security/authc/TokenServiceTests.java | 2 + ...InternalEnrollmentTokenGeneratorTests.java | 2 + .../apikey/RestCreateApiKeyActionTests.java | 3 +- .../apikey/RestGetApiKeyActionTests.java | 3 +- .../RestInvalidateApiKeyActionTests.java | 3 +- .../apikey/RestQueryApiKeyActionTests.java | 3 +- .../SecurityNetty4HeaderSizeLimitTests.java | 3 +- 32 files changed, 324 insertions(+), 36 deletions(-) create mode 100644 docs/changelog/102371.yaml diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java index ef834fad424e3..8c5de05a01648 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.threadpool.ThreadPool; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; @@ -166,7 +167,7 @@ public void setUp() throws Exception { .build(); Settings settings = Settings.builder().put("node.name", ShardsAvailabilityHealthIndicatorBenchmark.class.getSimpleName()).build(); - ThreadPool threadPool = new ThreadPool(settings); + ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP); ClusterService clusterService = new ClusterService( Settings.EMPTY, diff --git a/docs/changelog/102371.yaml b/docs/changelog/102371.yaml new file mode 100644 index 0000000000000..5a698bc9d671a --- /dev/null +++ b/docs/changelog/102371.yaml @@ -0,0 +1,5 @@ +pr: 102371 +summary: Adding threadpool metrics +area: Infra/Core +type: enhancement +issues: [] diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java index 1a698b778687c..9ab7412426db8 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java @@ -32,6 +32,13 @@ public class MetricNameValidator { static final int MAX_ELEMENT_LENGTH = 30; static final int MAX_NUMBER_OF_ELEMENTS = 10; + static final Set SKIP_VALIDATION_METRIC_NAMES_DUE_TO_BWC = Set.of( + "searchable_snapshots_cache_fetch_async", + "searchable_snapshots_cache_prewarming", + "security-token-key", + "security-crypto" + ); + private MetricNameValidator() {} /** @@ -42,6 +49,10 @@ private MetricNameValidator() {} */ public static String validate(String metricName) { Objects.requireNonNull(metricName); + + if (skipValidationToBWC(metricName)) { + return metricName; + } validateMaxMetricNameLength(metricName); String[] elements = metricName.split("\\."); @@ -53,6 +64,19 @@ public static String validate(String metricName) { return metricName; } + /** + * Due to backwards compatibility some metric names would have to skip validation. + * This is for instance where a threadpool name is too long, or contains `-` + * We want to allow to easily find threadpools in code base that are alerting with a metric + * as well as find thread pools metrics in dashboards with their codebase names. + * Renaming a threadpool name would be a breaking change. + * + * NOTE: only allow skipping validation if a refactor in codebase would cause a breaking change + */ + private static boolean skipValidationToBWC(String metricName) { + return SKIP_VALIDATION_METRIC_NAMES_DUE_TO_BWC.stream().anyMatch(m -> metricName.contains(m)); + } + private static void validateMaxMetricNameLength(String metricName) { if (metricName.length() > MAX_METRIC_NAME_LENGTH) { throw new IllegalArgumentException( @@ -108,6 +132,7 @@ private static void hasESPrefix(String[] elements, String name) { private static void perElementValidations(String[] elements, String name) { for (String element : elements) { + hasOnlyAllowedCharacters(element, name); hasNotBreachLengthLimit(element, name); } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java index 64f78d0af494c..9a5479cc65a93 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java @@ -78,6 +78,13 @@ public void testLastElementAllowList() { expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es.somemodule.somemetric.some_other_suffix")); } + public void testSkipValidationDueToBWC() { + for (String partOfMetricName : MetricNameValidator.SKIP_VALIDATION_METRIC_NAMES_DUE_TO_BWC) { + MetricNameValidator.validate("es.threadpool." + partOfMetricName + ".total");// fake metric name, but with the part that skips + // validation + } + } + public static String metricNameWithLength(int length) { int prefixAndSuffix = "es.".length() + ".utilization".length(); assert length > prefixAndSuffix : "length too short"; @@ -99,4 +106,5 @@ public static String metricNameWithLength(int length) { metricName.append("utilization"); return metricName.toString(); } + } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index baf3006378054..915d54c91b259 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; import org.elasticsearch.threadpool.ThreadPool; @@ -78,7 +79,7 @@ public class GeoIpDownloaderTests extends ESTestCase { public void setup() { httpClient = mock(HttpClient.class); clusterService = mock(ClusterService.class); - threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build()); + threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP); when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings( Settings.EMPTY, diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java index 224436a388ce5..3e74a74dbd49c 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.mocksocket.MockSocket; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportSettings; @@ -51,7 +52,7 @@ public class Netty4SizeHeaderFrameDecoderTests extends ESTestCase { @Before public void startThreadPool() { - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); NetworkService networkService = new NetworkService(Collections.emptyList()); PageCacheRecycler recycler = new MockPageCacheRecycler(Settings.EMPTY); nettyTransport = new Netty4Transport( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index 841f77ea7efab..e8950fbb2f9c6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -11,6 +11,11 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.InstrumentType; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -19,12 +24,18 @@ import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; +import java.util.ArrayList; +import java.util.Collection; import java.util.HashSet; +import java.util.List; +import java.util.Map; import java.util.Set; import java.util.regex.Pattern; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.in; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class SimpleThreadPoolIT extends ESIntegTestCase { @@ -33,6 +44,11 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return Settings.builder().build(); } + @Override + protected Collection> nodePlugins() { + return List.of(TestTelemetryPlugin.class); + } + public void testThreadNames() throws Exception { ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); Set preNodeStartThreadNames = new HashSet<>(); @@ -95,4 +111,66 @@ public void testThreadNames() throws Exception { } } + public void testThreadPoolMetrics() throws Exception { + internalCluster().startNode(); + + final String dataNodeName = internalCluster().getRandomNodeName(); + final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNodeName) + .filterPlugins(TestTelemetryPlugin.class) + .findFirst() + .orElseThrow(); + + logger.info("do some indexing, flushing, optimize, and searches"); + int numDocs = randomIntBetween(2, 100); + IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; ++i) { + builders[i] = prepareIndex("idx").setSource( + jsonBuilder().startObject() + .field("str_value", "s" + i) + .array("str_values", new String[] { "s" + (i * 2), "s" + (i * 2 + 1) }) + .field("l_value", i) + .array("l_values", new int[] { i * 2, i * 2 + 1 }) + .field("d_value", i) + .array("d_values", new double[] { i * 2, i * 2 + 1 }) + .endObject() + ); + } + indexRandom(true, builders); + int numSearches = randomIntBetween(2, 100); + for (int i = 0; i < numSearches; i++) { + assertNoFailures(prepareSearch("idx").setQuery(QueryBuilders.termQuery("str_value", "s" + i))); + assertNoFailures(prepareSearch("idx").setQuery(QueryBuilders.termQuery("l_value", i))); + } + plugin.collect(); + final var tp = internalCluster().getInstance(ThreadPool.class, dataNodeName); + ThreadPoolStats tps = tp.stats(); + ArrayList registeredMetrics = plugin.getRegisteredMetrics(InstrumentType.LONG_GAUGE); + registeredMetrics.addAll(plugin.getRegisteredMetrics(InstrumentType.LONG_ASYNC_COUNTER)); + tps.forEach(stats -> { + Map threadPoolMetrics = Map.of( + ThreadPool.THREAD_POOL_METRIC_NAME_COMPLETED, + stats.completed(), + ThreadPool.THREAD_POOL_METRIC_NAME_ACTIVE, + (long) stats.active(), + ThreadPool.THREAD_POOL_METRIC_NAME_CURRENT, + (long) stats.threads(), + ThreadPool.THREAD_POOL_METRIC_NAME_LARGEST, + (long) stats.largest(), + ThreadPool.THREAD_POOL_METRIC_NAME_QUEUE, + (long) stats.queue() + ); + threadPoolMetrics.forEach((suffix, value) -> { + String metricName = ThreadPool.THREAD_POOL_METRIC_PREFIX + stats.name() + suffix; + List measurements; + if (suffix.equals(ThreadPool.THREAD_POOL_METRIC_NAME_COMPLETED)) { + measurements = plugin.getLongAsyncCounterMeasurement(metricName); + } else { + measurements = plugin.getLongGaugeMeasurement(metricName); + } + assertThat(metricName, in(registeredMetrics)); + assertThat(measurements.get(0).value(), equalTo(value)); + }); + }); + } + } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionHandler.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionHandler.java index 3878a4a2dff9d..9457773eb8071 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionHandler.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionHandler.java @@ -9,6 +9,8 @@ package org.elasticsearch.common.util.concurrent; import org.elasticsearch.common.metrics.CounterMetric; +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.MeterRegistry; import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.ThreadPoolExecutor; @@ -16,6 +18,7 @@ public abstract class EsRejectedExecutionHandler implements RejectedExecutionHandler { private final CounterMetric rejected = new CounterMetric(); + private LongCounter rejectionCounter = null; /** * The number of rejected executions. @@ -26,6 +29,14 @@ public long rejected() { protected void incrementRejections() { rejected.inc(); + if (rejectionCounter != null) { + rejectionCounter.increment(); + } + } + + public void registerCounter(MeterRegistry meterRegistry, String prefix, String name) { + rejectionCounter = meterRegistry.registerLongCounter(prefix + ".rejected.total", "number of rejected threads for " + name, "count"); + rejectionCounter.incrementBy(rejected()); } protected static EsRejectedExecutionException newRejectedException( diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index aa62ea689a5a9..3cad0232cb2cf 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -183,6 +183,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -241,8 +242,8 @@ static NodeConstruction prepareConstruction( NodeConstruction constructor = new NodeConstruction(closeables); Settings settings = constructor.createEnvironment(initialEnvironment, serviceProvider); - - ThreadPool threadPool = constructor.createThreadPool(settings); + TelemetryProvider telemetryProvider = constructor.createTelemetryProvider(settings); + ThreadPool threadPool = constructor.createThreadPool(settings, telemetryProvider.getMeterRegistry()); SettingsModule settingsModule = constructor.validateSettings(initialEnvironment.settings(), settings, threadPool); SearchModule searchModule = constructor.createSearchModule(settingsModule.getSettings(), threadPool); @@ -257,7 +258,8 @@ static NodeConstruction prepareConstruction( scriptService, constructor.createAnalysisRegistry(), serviceProvider, - forbidPrivateIndexSettings + forbidPrivateIndexSettings, + telemetryProvider ); return constructor; @@ -448,9 +450,14 @@ private Settings createEnvironment(Environment initialEnvironment, NodeServicePr return settings; } - private ThreadPool createThreadPool(Settings settings) throws IOException { + private TelemetryProvider createTelemetryProvider(Settings settings) { + return getSinglePlugin(TelemetryPlugin.class).map(p -> p.getTelemetryProvider(settings)).orElse(TelemetryProvider.NOOP); + } + + private ThreadPool createThreadPool(Settings settings, MeterRegistry meterRegistry) throws IOException { ThreadPool threadPool = new ThreadPool( settings, + meterRegistry, pluginsService.flatMap(p -> p.getExecutorBuilders(settings)).toArray(ExecutorBuilder[]::new) ); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); @@ -580,13 +587,12 @@ private void construct( ScriptService scriptService, AnalysisRegistry analysisRegistry, NodeServiceProvider serviceProvider, - boolean forbidPrivateIndexSettings + boolean forbidPrivateIndexSettings, + TelemetryProvider telemetryProvider ) throws IOException { Settings settings = settingsModule.getSettings(); - TelemetryProvider telemetryProvider = getSinglePlugin(TelemetryPlugin.class).map(p -> p.getTelemetryProvider(settings)) - .orElse(TelemetryProvider.NOOP); modules.bindToInstance(Tracer.class, telemetryProvider.getTracer()); TaskManager taskManager = new TaskManager( @@ -598,6 +604,7 @@ private void construct( ).collect(Collectors.toSet()), telemetryProvider.getTracer() ); + final Tracer tracer = telemetryProvider.getTracer(); ClusterService clusterService = createClusterService(settingsModule, threadPool, taskManager); clusterService.addStateApplier(scriptService); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 17cafaee19bb4..fef0d93ec86cc 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -29,6 +29,11 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.Node; import org.elasticsearch.node.ReportingService; +import org.elasticsearch.telemetry.metric.Instrument; +import org.elasticsearch.telemetry.metric.LongAsyncCounter; +import org.elasticsearch.telemetry.metric.LongGauge; +import org.elasticsearch.telemetry.metric.LongWithAttributes; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -85,6 +90,13 @@ public static class Names { public static final String SYSTEM_CRITICAL_WRITE = "system_critical_write"; } + public static final String THREAD_POOL_METRIC_PREFIX = "es.thread_pool."; + public static final String THREAD_POOL_METRIC_NAME_COMPLETED = ".threads.completed.total"; + public static final String THREAD_POOL_METRIC_NAME_CURRENT = ".threads.count.current"; + public static final String THREAD_POOL_METRIC_NAME_QUEUE = ".threads.queue.size"; + public static final String THREAD_POOL_METRIC_NAME_ACTIVE = ".threads.active.current"; + public static final String THREAD_POOL_METRIC_NAME_LARGEST = ".threads.largest.current"; + public enum ThreadPoolType { DIRECT("direct"), FIXED("fixed"), @@ -153,6 +165,8 @@ public static ThreadPoolType fromType(String type) { private final long slowSchedulerWarnThresholdNanos; + private Map> instruments; + @SuppressWarnings("rawtypes") public Collection builders() { return Collections.unmodifiableCollection(builders.values()); @@ -180,7 +194,7 @@ public Collection builders() { ); @SuppressWarnings({ "rawtypes", "unchecked" }) - public ThreadPool(final Settings settings, final ExecutorBuilder... customBuilders) { + public ThreadPool(final Settings settings, MeterRegistry meterRegistry, final ExecutorBuilder... customBuilders) { assert Node.NODE_NAME_SETTING.exists(settings); final Map builders = new HashMap<>(); @@ -189,6 +203,7 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui final int halfProcMaxAt5 = halfAllocatedProcessorsMaxFive(allocatedProcessors); final int halfProcMaxAt10 = halfAllocatedProcessorsMaxTen(allocatedProcessors); final int genericThreadPoolMax = boundedBy(4 * allocatedProcessors, 128, 512); + final Map> instruments = new HashMap<>(); builders.put( Names.GENERIC, @@ -307,7 +322,8 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui executors.put(Names.SAME, new ExecutorHolder(EsExecutors.DIRECT_EXECUTOR_SERVICE, new Info(Names.SAME, ThreadPoolType.DIRECT))); this.executors = Map.copyOf(executors); - + this.executors.forEach((k, v) -> instruments.put(k, setupMetrics(meterRegistry, k, v))); + this.instruments = instruments; final List infos = executors.values() .stream() .filter(holder -> holder.info.getName().equals("same") == false) @@ -324,6 +340,59 @@ public ThreadPool(final Settings settings, final ExecutorBuilder... customBui this.cachedTimeThread.start(); } + private static ArrayList setupMetrics(MeterRegistry meterRegistry, String name, ExecutorHolder holder) { + Map at = Map.of(); + ArrayList instruments = new ArrayList<>(); + if (holder.executor() instanceof ThreadPoolExecutor threadPoolExecutor) { + String prefix = THREAD_POOL_METRIC_PREFIX + name; + instruments.add( + meterRegistry.registerLongGauge( + prefix + THREAD_POOL_METRIC_NAME_CURRENT, + "number of threads for " + name, + "count", + () -> new LongWithAttributes(threadPoolExecutor.getPoolSize(), at) + ) + ); + instruments.add( + meterRegistry.registerLongGauge( + prefix + THREAD_POOL_METRIC_NAME_QUEUE, + "number queue size for " + name, + "count", + () -> new LongWithAttributes(threadPoolExecutor.getQueue().size(), at) + ) + ); + instruments.add( + meterRegistry.registerLongGauge( + prefix + THREAD_POOL_METRIC_NAME_ACTIVE, + "number of active threads for " + name, + "count", + () -> new LongWithAttributes(threadPoolExecutor.getActiveCount(), at) + ) + ); + instruments.add( + meterRegistry.registerLongGauge( + prefix + THREAD_POOL_METRIC_NAME_LARGEST, + "largest pool size for " + name, + "count", + () -> new LongWithAttributes(threadPoolExecutor.getLargestPoolSize(), at) + ) + ); + instruments.add( + meterRegistry.registerLongAsyncCounter( + prefix + THREAD_POOL_METRIC_NAME_COMPLETED, + "number of completed threads for " + name, + "count", + () -> new LongWithAttributes(threadPoolExecutor.getCompletedTaskCount(), at) + ) + ); + RejectedExecutionHandler rejectedExecutionHandler = threadPoolExecutor.getRejectedExecutionHandler(); + if (rejectedExecutionHandler instanceof EsRejectedExecutionHandler handler) { + handler.registerCounter(meterRegistry, prefix, name); + } + } + return instruments; + } + // for subclassing by tests that don't actually use any of the machinery that the regular constructor sets up protected ThreadPool() { this.builders = Map.of(); @@ -541,11 +610,33 @@ protected final void stopCachedTimeThread() { cachedTimeThread.interrupt(); } + private void closeMetrics(ExecutorHolder executor) { + if (this.instruments.containsKey(executor.info.getName())) { + this.instruments.get(executor.info.getName()).forEach((instrument) -> { + if (instrument instanceof LongAsyncCounter longasynccounter) { + try { + longasynccounter.close(); + } catch (Exception e) { + logger.warn(format("Failed to close LongAsyncCounter for %s. %s", executor.info.getName(), e.getMessage()), e); + } + } else if (instrument instanceof LongGauge longgauge) { + try { + longgauge.close(); + } catch (Exception e) { + logger.warn(format("Failed to close LongGauge for %s. %s", executor.info.getName(), e.getMessage()), e); + } + } + }); + } + this.instruments.remove(executor.info.getName()); + } + public void shutdown() { stopCachedTimeThread(); scheduler.shutdown(); for (ExecutorHolder executor : executors.values()) { if (executor.executor() instanceof ThreadPoolExecutor) { + closeMetrics(executor); executor.executor().shutdown(); } } @@ -556,6 +647,7 @@ public void shutdownNow() { scheduler.shutdownNow(); for (ExecutorHolder executor : executors.values()) { if (executor.executor() instanceof ThreadPoolExecutor) { + closeMetrics(executor); executor.executor().shutdownNow(); } } @@ -565,6 +657,7 @@ public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedE boolean result = scheduler.awaitTermination(timeout, unit); for (ExecutorHolder executor : executors.values()) { if (executor.executor() instanceof ThreadPoolExecutor) { + closeMetrics(executor); result &= executor.executor().awaitTermination(timeout, unit); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index fb27d824417b1..d04e41c83699d 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; @@ -52,7 +53,7 @@ public void testParentTaskId() throws Exception { Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); - ThreadPool threadPool = new ThreadPool(settings); + ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP); try { TransportService transportService = new TransportService( Settings.EMPTY, @@ -120,7 +121,7 @@ public void testBatchExecute() throws ExecutionException, InterruptedException { Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); - ThreadPool threadPool = new ThreadPool(settings); + ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP); TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 2271821fc07da..e0eed9daa97f6 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -76,6 +76,7 @@ import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.telemetry.TelemetryProvider; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.transport.MockTransportService; @@ -1696,7 +1697,7 @@ public void testCCSCompatibilityCheck() throws Exception { ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); - ThreadPool threadPool = new ThreadPool(settings); + ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP); try { TransportService transportService = MockTransportService.createNewService( Settings.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index 64ab7a9819190..82c204b1d0b88 100644 --- a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; @@ -50,7 +51,10 @@ public class TransportActionFilterChainTests extends ESTestCase { @Before public void init() throws Exception { counter = new AtomicInteger(); - threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "TransportActionFilterChainTests").build()); + threadPool = new ThreadPool( + Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "TransportActionFilterChainTests").build(), + MeterRegistry.NOOP + ); } @After diff --git a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java index 5175fee7edceb..97c52ef2edc37 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java +++ b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; @@ -76,7 +77,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); client = buildClient(settings, ACTIONS); } diff --git a/server/src/test/java/org/elasticsearch/http/HttpClientStatsTrackerTests.java b/server/src/test/java/org/elasticsearch/http/HttpClientStatsTrackerTests.java index 99e99540489c5..2dfaaf34bb1f1 100644 --- a/server/src/test/java/org/elasticsearch/http/HttpClientStatsTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/http/HttpClientStatsTrackerTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.Node; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -437,7 +438,7 @@ private static class FakeTimeThreadPool extends ThreadPool { private final long absoluteTimeOffset = randomLong(); FakeTimeThreadPool() { - super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build()); + super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP); stopCachedTimeThread(); setRandomTime(); } diff --git a/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java index 5c355c8009d54..6be78f27135a5 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.telemetry.metric.MeterRegistry; import java.util.concurrent.CountDownLatch; @@ -33,7 +34,7 @@ public void testRejectedExecutionCounter() throws InterruptedException { .put("thread_pool." + threadPoolName + ".queue_size", queueSize) .build(); try { - threadPool = new ThreadPool(nodeSettings); + threadPool = new ThreadPool(nodeSettings, MeterRegistry.NOOP); // these tasks will consume the thread pool causing further // submissions to queue diff --git a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java index 8d7a486ee79f0..9a0c5c4b75d54 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.CheckedRunnable; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.hamcrest.Matcher; import java.util.HashMap; @@ -424,7 +425,7 @@ public void runScalingThreadPoolTest(final Settings settings, final BiConsumer getMeasurements(InstrumentType instrumentType, String n return metrics.get(instrumentType).called.getOrDefault(Objects.requireNonNull(name), Collections.emptyList()); } + public ArrayList getRegisteredMetrics(InstrumentType instrumentType) { + ArrayList registeredMetrics = new ArrayList<>(); + metrics.get(instrumentType).instruments.forEach((name, registration) -> { registeredMetrics.add(name); }); + return registeredMetrics; + } + /** * Get the {@link Registration} for a given elasticsearch {@link Instrument}. */ diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java b/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java index e237f6c9bbb4b..a4c73634dc102 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java @@ -15,6 +15,7 @@ import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.telemetry.tracing.Tracer; +import java.util.ArrayList; import java.util.List; /** @@ -41,6 +42,10 @@ public List getLongCounterMeasurement(String name) { return meter.getRecorder().getMeasurements(InstrumentType.LONG_COUNTER, name); } + public List getLongAsyncCounterMeasurement(String name) { + return meter.getRecorder().getMeasurements(InstrumentType.LONG_ASYNC_COUNTER, name); + } + public List getDoubleUpDownCounterMeasurement(String name) { return meter.getRecorder().getMeasurements(InstrumentType.DOUBLE_UP_DOWN_COUNTER, name); } @@ -65,10 +70,18 @@ public List getLongHistogramMeasurement(String name) { return meter.getRecorder().getMeasurements(InstrumentType.LONG_HISTOGRAM, name); } + public void collect() { + meter.getRecorder().collect(); + } + public void resetMeter() { meter.getRecorder().resetCalls(); } + public ArrayList getRegisteredMetrics(InstrumentType instrumentType) { + return meter.getRecorder().getRegisteredMetrics(instrumentType); + } + @Override public TelemetryProvider getTelemetryProvider(Settings settings) { return new TelemetryProvider() { diff --git a/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java b/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java index e8a853989e8e5..ce8e3a2574f3e 100644 --- a/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java +++ b/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Releasable; import org.elasticsearch.node.Node; +import org.elasticsearch.telemetry.metric.MeterRegistry; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; @@ -30,7 +31,7 @@ public TestThreadPool(String name, ExecutorBuilder... customBuilders) { } public TestThreadPool(String name, Settings settings, ExecutorBuilder... customBuilders) { - super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), customBuilders); + super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), MeterRegistry.NOOP, customBuilders); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index e9a252553fe8d..3c6f7462c0bb4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -265,6 +265,7 @@ public void init() throws Exception { client = mock(Client.class); threadPool = new ThreadPool( settings, + MeterRegistry.NOOP, new FixedExecutorBuilder( settings, THREAD_POOL_NAME, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index ff3acbc122501..f2cb09a2c9d3d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -62,6 +62,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; @@ -269,6 +270,7 @@ public void tearDown() throws Exception { public static void startThreadPool() throws IOException { threadPool = new ThreadPool( settings, + MeterRegistry.NOOP, new FixedExecutorBuilder( settings, TokenService.THREAD_POOL_NAME, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java index 2abbb6a610170..3a4e5a404eace 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.http.HttpInfo; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.node.Node; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -84,6 +85,7 @@ public static void startThreadPool() throws IOException { final Settings settings = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "InternalEnrollmentTokenGeneratorTests").build(); threadPool = new ThreadPool( settings, + MeterRegistry.NOOP, new FixedExecutorBuilder( settings, TokenService.THREAD_POOL_NAME, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java index 791aba46c92ea..0ab9533e62d4c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -54,7 +55,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index a1f696cc5dddd..2ee42b360f02a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -60,7 +61,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java index 3c0e24da32763..8bbd051c2fc32 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -53,7 +54,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java index 67d2ab006eb22..4f14d8414ebca 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.searchafter.SearchAfterBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -58,7 +59,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java index c87ddd116b138..8c422342c3640 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.mocksocket.MockSocket; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -77,7 +78,7 @@ public final class SecurityNetty4HeaderSizeLimitTests extends ESTestCase { @Before public void startThreadPool() { - threadPool = new ThreadPool(settings); + threadPool = new ThreadPool(settings, MeterRegistry.NOOP); TaskManager taskManager = new TaskManager(settings, threadPool, Collections.emptySet()); NetworkService networkService = new NetworkService(Collections.emptyList()); PageCacheRecycler recycler = new MockPageCacheRecycler(Settings.EMPTY); From 8203334ce62673a1eed24e64c450efb3632d4661 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Wed, 17 Jan 2024 15:44:22 +0100 Subject: [PATCH 79/95] Enable SymbolicLinkPreservingTarFuncTest (#104452) Addressing #104428 --- .../gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy | 2 -- 1 file changed, 2 deletions(-) diff --git a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy index 3b05a2753f216..237aa99e4b824 100644 --- a/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy +++ b/build-tools-internal/src/integTest/groovy/org/elasticsearch/gradle/internal/SymbolicLinkPreservingTarFuncTest.groovy @@ -23,7 +23,6 @@ import java.nio.file.Path import java.nio.file.Paths import java.util.function.Function -@Ignore("https://github.com/elastic/elasticsearch/issues/104428") class SymbolicLinkPreservingTarFuncTest extends AbstractGradleFuncTest { def setup() { @@ -131,7 +130,6 @@ tasks.register("buildTar", SymbolicLinkPreservingTar) { SymbolicLinkPreservingTa while (entry != null) { if (entry.getName().equals("real-folder/")) { assert entry.isDirectory() - assert entry.getMode() == 16877 realFolderEntry = true } else if (entry.getName().equals("real-folder/file")) { assert entry.isFile() From 2b14ff8efc28536f363a762b07a3ea6007d7433a Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 17 Jan 2024 15:55:15 +0100 Subject: [PATCH 80/95] [s3-repository] Refresh Web Identity Credentials when Web Identity token changes (#104135) STSAssumeRoleWithWebIdentitySessionCredentialsProvider is supposed to automatically refresh credential when the current credentials expire and pick up a new web identity token if it has been rotated, but there have been reports from the field that doesn't work reliably. To make sure that the credentials get refreshed, let's create a file watcher that explicitly refresh the credentials when the web identity token changes Resolves #101828 --- .../repositories/s3/S3RepositoryPlugin.java | 7 +- .../repositories/s3/S3Service.java | 38 +++++- ...IdentityTokenCredentialsProviderTests.java | 120 +++++++++++++++--- .../s3/RepositoryCredentialsTests.java | 9 +- .../s3/S3BlobContainerRetriesTests.java | 3 +- .../s3/S3ClientSettingsTests.java | 3 +- .../repositories/s3/S3RepositoryTests.java | 7 +- .../repositories/s3/S3ServiceTests.java | 7 +- 8 files changed, 162 insertions(+), 32 deletions(-) diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index ba762537537e3..83668cc271922 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -25,6 +25,7 @@ import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; +import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.io.IOException; @@ -84,13 +85,13 @@ protected S3Repository createRepository( @Override public Collection createComponents(PluginServices services) { - service.set(s3Service(services.environment(), services.clusterService().getSettings())); + service.set(s3Service(services.environment(), services.clusterService().getSettings(), services.resourceWatcherService())); this.service.get().refreshAndClearCache(S3ClientSettings.load(settings)); return List.of(service); } - S3Service s3Service(Environment environment, Settings nodeSettings) { - return new S3Service(environment, nodeSettings); + S3Service s3Service(Environment environment, Settings nodeSettings, ResourceWatcherService resourceWatcherService) { + return new S3Service(environment, nodeSettings, resourceWatcherService); } @Override diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 1fd31047c735a..fc58482651fa3 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -28,6 +28,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.coordination.stateless.StoreHeartbeatService; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.Strings; @@ -37,6 +38,9 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; +import org.elasticsearch.watcher.FileChangesListener; +import org.elasticsearch.watcher.FileWatcher; +import org.elasticsearch.watcher.ResourceWatcherService; import java.io.Closeable; import java.io.IOException; @@ -68,7 +72,6 @@ class S3Service implements Closeable { TimeValue.timeValueHours(24), Setting.Property.NodeScope ); - private volatile Map clientsCache = emptyMap(); /** @@ -90,12 +93,13 @@ class S3Service implements Closeable { final TimeValue compareAndExchangeTimeToLive; final TimeValue compareAndExchangeAntiContentionDelay; - S3Service(Environment environment, Settings nodeSettings) { + S3Service(Environment environment, Settings nodeSettings, ResourceWatcherService resourceWatcherService) { webIdentityTokenCredentialsProvider = new CustomWebIdentityTokenCredentialsProvider( environment, System::getenv, System::getProperty, - Clock.systemUTC() + Clock.systemUTC(), + resourceWatcherService ); compareAndExchangeTimeToLive = REPOSITORY_S3_CAS_TTL_SETTING.get(nodeSettings); compareAndExchangeAntiContentionDelay = REPOSITORY_S3_CAS_ANTI_CONTENTION_DELAY_SETTING.get(nodeSettings); @@ -333,7 +337,8 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials Environment environment, SystemEnvironment systemEnvironment, JvmEnvironment jvmEnvironment, - Clock clock + Clock clock, + ResourceWatcherService resourceWatcherService ) { // Check whether the original environment variable exists. If it doesn't, // the system doesn't support AWS web identity tokens @@ -395,6 +400,31 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials roleSessionName, webIdentityTokenFileSymlink.toString() ).withStsClient(stsClient).build(); + var watcher = new FileWatcher(webIdentityTokenFileSymlink); + watcher.addListener(new FileChangesListener() { + + @Override + public void onFileCreated(Path file) { + onFileChanged(file); + } + + @Override + public void onFileChanged(Path file) { + if (file.equals(webIdentityTokenFileSymlink)) { + LOGGER.debug("WS web identity token file [{}] changed, updating credentials", file); + credentialsProvider.refresh(); + } + } + }); + try { + resourceWatcherService.add(watcher, ResourceWatcherService.Frequency.LOW); + } catch (IOException e) { + throw new ElasticsearchException( + "failed to start watching AWS web identity token file [{}]", + e, + webIdentityTokenFileSymlink + ); + } } catch (Exception e) { stsClient.shutdown(); throw e; diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java index cecb0cd147897..fb775ab31c04d 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java @@ -9,16 +9,21 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; import com.sun.net.httpserver.HttpServer; import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.junit.After; import org.junit.Assert; import org.mockito.Mockito; @@ -36,12 +41,23 @@ import java.time.format.DateTimeFormatter; import java.util.Arrays; import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.function.Consumer; import java.util.stream.Collectors; public class CustomWebIdentityTokenCredentialsProviderTests extends ESTestCase { private static final String ROLE_ARN = "arn:aws:iam::123456789012:role/FederatedWebIdentityRole"; private static final String ROLE_NAME = "aws-sdk-java-1651084775908"; + private final TestThreadPool threadPool = new TestThreadPool("test"); + private final Settings settings = Settings.builder().put("resource.reload.interval.low", TimeValue.timeValueMillis(100)).build(); + private final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); + + @After + public void shutdown() throws Exception { + resourceWatcherService.close(); + threadPool.shutdown(); + } private static Environment getEnvironment() throws IOException { Path configDirectory = createTempDir("web-identity-token-test"); @@ -53,7 +69,7 @@ private static Environment getEnvironment() throws IOException { } @SuppressForbidden(reason = "HTTP server is used for testing") - public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { + private static HttpServer getHttpServer(Consumer webIdentityTokenCheck) throws IOException { HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); httpServer.createContext("/", exchange -> { try (exchange) { @@ -62,6 +78,7 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { .map(e -> e.split("=")) .collect(Collectors.toMap(e -> e[0], e -> URLDecoder.decode(e[1], StandardCharsets.UTF_8))); assertEquals(ROLE_NAME, params.get("RoleSessionName")); + webIdentityTokenCheck.accept(params.get("WebIdentityToken")); exchange.getResponseHeaders().add("Content-Type", "text/xml; charset=UTF-8"); byte[] response = Strings.format( @@ -97,25 +114,41 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { } }); httpServer.start(); + return httpServer; + } - Environment environment = getEnvironment(); - - // No region is set, but the SDK shouldn't fail because of that - Map environmentVariables = Map.of( - "AWS_WEB_IDENTITY_TOKEN_FILE", - "/var/run/secrets/eks.amazonaws.com/serviceaccount/token", - "AWS_ROLE_ARN", - ROLE_ARN - ); - Map systemProperties = Map.of( + @SuppressForbidden(reason = "HTTP server is used for testing") + private static Map getSystemProperties(HttpServer httpServer) { + return Map.of( "com.amazonaws.sdk.stsMetadataServiceEndpointOverride", "http://" + httpServer.getAddress().getHostName() + ":" + httpServer.getAddress().getPort() ); + } + + private static Map environmentVariables() { + return Map.of("AWS_WEB_IDENTITY_TOKEN_FILE", "/var/run/secrets/eks.amazonaws.com/serviceaccount/token", "AWS_ROLE_ARN", ROLE_ARN); + } + + private static void assertCredentials(AWSCredentials credentials) { + Assert.assertFalse(credentials.getAWSAccessKeyId().isEmpty()); + Assert.assertFalse(credentials.getAWSSecretKey().isEmpty()); + } + + @SuppressForbidden(reason = "HTTP server is used for testing") + public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { + HttpServer httpServer = getHttpServer(s -> assertEquals("YXdzLXdlYi1pZGVudGl0eS10b2tlbi1maWxl", s)); + + Environment environment = getEnvironment(); + + // No region is set, but the SDK shouldn't fail because of that + Map environmentVariables = environmentVariables(); + Map systemProperties = getSystemProperties(httpServer); var webIdentityTokenCredentialsProvider = new S3Service.CustomWebIdentityTokenCredentialsProvider( environment, environmentVariables::get, systemProperties::getOrDefault, - Clock.fixed(Instant.ofEpochMilli(1651084775908L), ZoneOffset.UTC) + Clock.fixed(Instant.ofEpochMilli(1651084775908L), ZoneOffset.UTC), + resourceWatcherService ); try { AWSCredentials credentials = S3Service.buildCredentials( @@ -124,8 +157,64 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { webIdentityTokenCredentialsProvider ).getCredentials(); - Assert.assertEquals("sts_access_key", credentials.getAWSAccessKeyId()); - Assert.assertEquals("secret_access_key", credentials.getAWSSecretKey()); + assertCredentials(credentials); + } finally { + webIdentityTokenCredentialsProvider.shutdown(); + httpServer.stop(0); + } + } + + private static class DelegatingConsumer implements Consumer { + private Consumer delegate; + + private DelegatingConsumer(Consumer delegate) { + this.delegate = delegate; + } + + private void setDelegate(Consumer delegate) { + this.delegate = delegate; + } + + @Override + public void accept(String s) { + delegate.accept(s); + } + } + + @SuppressForbidden(reason = "HTTP server is used for testing") + public void testPickUpNewWebIdentityTokenWhenItsChanged() throws Exception { + DelegatingConsumer webIdentityTokenCheck = new DelegatingConsumer(s -> assertEquals("YXdzLXdlYi1pZGVudGl0eS10b2tlbi1maWxl", s)); + + HttpServer httpServer = getHttpServer(webIdentityTokenCheck); + Environment environment = getEnvironment(); + Map environmentVariables = environmentVariables(); + Map systemProperties = getSystemProperties(httpServer); + var webIdentityTokenCredentialsProvider = new S3Service.CustomWebIdentityTokenCredentialsProvider( + environment, + environmentVariables::get, + systemProperties::getOrDefault, + Clock.fixed(Instant.ofEpochMilli(1651084775908L), ZoneOffset.UTC), + resourceWatcherService + ); + try { + AWSCredentialsProvider awsCredentialsProvider = S3Service.buildCredentials( + LogManager.getLogger(S3Service.class), + S3ClientSettings.getClientSettings(Settings.EMPTY, randomAlphaOfLength(8)), + webIdentityTokenCredentialsProvider + ); + assertCredentials(awsCredentialsProvider.getCredentials()); + + var latch = new CountDownLatch(1); + String newWebIdentityToken = "88f84342080d4671a511e10ae905b2b0"; + webIdentityTokenCheck.setDelegate(s -> { + if (s.equals(newWebIdentityToken)) { + latch.countDown(); + } + }); + Files.writeString(environment.configFile().resolve("repository-s3/aws-web-identity-token-file"), newWebIdentityToken); + + safeAwait(latch); + assertCredentials(awsCredentialsProvider.getCredentials()); } finally { webIdentityTokenCredentialsProvider.shutdown(); httpServer.stop(0); @@ -149,7 +238,8 @@ public void testSupportRegionalizedEndpoints() throws Exception { getEnvironment(), environmentVariables::get, systemProperties::getOrDefault, - Clock.systemUTC() + Clock.systemUTC(), + resourceWatcherService ); // We can't verify that webIdentityTokenCredentialsProvider's STS client uses the "https://sts.us-west-2.amazonaws.com" // endpoint in a unit test. The client depends on hardcoded RegionalEndpointsOptionResolver that in turn depends diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 085d438618a19..28a48c2968f59 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.security.AccessController; @@ -274,8 +275,8 @@ protected void assertSnapshotOrGenericThread() { } @Override - S3Service s3Service(Environment environment, Settings nodeSettings) { - return new ProxyS3Service(environment, nodeSettings); + S3Service s3Service(Environment environment, Settings nodeSettings, ResourceWatcherService resourceWatcherService) { + return new ProxyS3Service(environment, nodeSettings, resourceWatcherService); } public static final class ClientAndCredentials extends AmazonS3Wrapper { @@ -291,8 +292,8 @@ public static final class ProxyS3Service extends S3Service { private static final Logger logger = LogManager.getLogger(ProxyS3Service.class); - ProxyS3Service(Environment environment, Settings nodeSettings) { - super(environment, nodeSettings); + ProxyS3Service(Environment environment, Settings nodeSettings, ResourceWatcherService resourceWatcherService) { + super(environment, nodeSettings, resourceWatcherService); } @Override diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index b090fb3d34814..58c079515aa47 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.blobstore.AbstractBlobContainerRetriesTestCase; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; +import org.elasticsearch.watcher.ResourceWatcherService; import org.hamcrest.Matcher; import org.junit.After; import org.junit.Before; @@ -92,7 +93,7 @@ public class S3BlobContainerRetriesTests extends AbstractBlobContainerRetriesTes @Before public void setUp() throws Exception { shouldErrorOnDns = new AtomicBoolean(false); - service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY) { + service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY, Mockito.mock(ResourceWatcherService.class)) { @Override protected AmazonS3ClientBuilder buildClientBuilder(S3ClientSettings clientSettings) { final AmazonS3ClientBuilder builder = super.buildClientBuilder(clientSettings); diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java index c48e0dc337d30..31bfd3a5e157f 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ClientSettingsTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.watcher.ResourceWatcherService; import org.mockito.Mockito; import java.io.IOException; @@ -178,7 +179,7 @@ public void testRegionCanBeSet() throws IOException { ); assertThat(settings.get("default").region, is("")); assertThat(settings.get("other").region, is(region)); - try (S3Service s3Service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY)) { + try (var s3Service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY, Mockito.mock(ResourceWatcherService.class))) { AmazonS3Client other = (AmazonS3Client) s3Service.buildClient(settings.get("other")); assertThat(other.getSignerRegionOverride(), is(region)); } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index ab5edc4608bfd..0a92ed0a28973 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.hamcrest.Matchers; import org.mockito.Mockito; @@ -45,8 +46,8 @@ public void shutdown() { private static class DummyS3Service extends S3Service { - DummyS3Service(Environment environment) { - super(environment, Settings.EMPTY); + DummyS3Service(Environment environment, ResourceWatcherService resourceWatcherService) { + super(environment, Settings.EMPTY, resourceWatcherService); } @Override @@ -125,7 +126,7 @@ private S3Repository createS3Repo(RepositoryMetadata metadata) { return new S3Repository( metadata, NamedXContentRegistry.EMPTY, - new DummyS3Service(Mockito.mock(Environment.class)), + new DummyS3Service(Mockito.mock(Environment.class), Mockito.mock(ResourceWatcherService.class)), BlobStoreTestUtil.mockClusterService(), MockBigArrays.NON_RECYCLING_INSTANCE, new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java index bbdeea6d87631..33e56bcf2180b 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3ServiceTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.watcher.ResourceWatcherService; import org.mockito.Mockito; import java.io.IOException; @@ -18,7 +19,11 @@ public class S3ServiceTests extends ESTestCase { public void testCachedClientsAreReleased() throws IOException { - final S3Service s3Service = new S3Service(Mockito.mock(Environment.class), Settings.EMPTY); + final S3Service s3Service = new S3Service( + Mockito.mock(Environment.class), + Settings.EMPTY, + Mockito.mock(ResourceWatcherService.class) + ); final Settings settings = Settings.builder().put("endpoint", "http://first").build(); final RepositoryMetadata metadata1 = new RepositoryMetadata("first", "s3", settings); final RepositoryMetadata metadata2 = new RepositoryMetadata("second", "s3", settings); From 73a68409c231952e3f19dc9b4874f2974c6cc6ca Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 17 Jan 2024 16:16:39 +0100 Subject: [PATCH 81/95] Ref count search response bytes (#103763) Final step in #102030 ... actually makes `SearchHit` read a releasable bytes reference. Does still fallback to copying to unrolled buffers here and there which can be removed in follow-ups where it's worth the effort (aggs being the most important one probably). Hard to create very reliable benchmarks for this because all our macro-benchmarks are quite noisy. Running http logs and PMC though, there's a statistically significant reduction in GC and reduced tail latencies in most benchmarks. The overhead for ref-counting these bytes isn't visible in profiling as far as I can tell and for large source values, no corresponding large `byte[]` are created any longer outside of the few remaining spots where we copy to pooled buffers. closes #102657 closes #102030 --- docs/changelog/103763.yaml | 6 + .../geoip/DatabaseNodeServiceTests.java | 4 +- .../PercolatorHighlightSubFetchPhase.java | 2 +- ...rcolatorMatchedSlotSubFetchPhaseTests.java | 6 +- .../index/rankeval/RatedSearchHit.java | 4 +- ...rollDocumentsAfterConflictsIntegTests.java | 2 +- .../reindex/AsyncBulkByScrollActionTests.java | 4 +- .../ClientScrollableHitSourceTests.java | 4 +- .../functionscore/RandomScoreFunctionIT.java | 2 +- .../search/AbstractSearchAsyncAction.java | 2 +- .../search/ArraySearchPhaseResults.java | 42 +- .../search/CanMatchPreFilterSearchPhase.java | 19 +- .../CountOnlyQueryPhaseResultConsumer.java | 17 +- .../action/search/CountedCollector.java | 7 +- .../action/search/DfsQueryPhase.java | 2 +- .../action/search/ExpandSearchPhase.java | 1 + .../action/search/FetchSearchPhase.java | 25 +- .../search/QueryPhaseResultConsumer.java | 6 +- .../SearchDfsQueryThenFetchAsyncAction.java | 3 +- .../action/search/SearchPhaseController.java | 13 +- .../action/search/SearchPhaseResults.java | 4 +- .../action/search/SearchResponse.java | 39 +- .../action/search/SearchResponseMerger.java | 59 +- .../action/search/SearchResponseSections.java | 3 +- .../bytes/ReleasableBytesReference.java | 4 +- .../reindex/ClientScrollableHitSource.java | 4 +- .../org/elasticsearch/search/SearchHit.java | 129 ++++- .../org/elasticsearch/search/SearchHits.java | 118 +++- .../aggregations/metrics/InternalTopHits.java | 7 +- .../search/fetch/FetchPhase.java | 10 +- .../search/fetch/FetchPhaseDocsIterator.java | 5 + .../search/fetch/FetchSearchResult.java | 12 +- .../search/fetch/subphase/InnerHitsPhase.java | 4 +- .../completion/CompletionSuggestion.java | 6 +- .../AbstractSearchAsyncActionTests.java | 10 +- ...ountOnlyQueryPhaseResultConsumerTests.java | 27 +- .../action/search/CountedCollectorTests.java | 5 +- .../action/search/DfsQueryPhaseTests.java | 69 ++- .../action/search/ExpandSearchPhaseTests.java | 124 ++-- .../search/FetchLookupFieldsPhaseTests.java | 5 + .../action/search/FetchSearchPhaseTests.java | 164 +++--- .../action/search/MockSearchPhaseContext.java | 9 +- .../search/QueryPhaseResultConsumerTests.java | 31 +- .../action/search/SearchAsyncActionTests.java | 14 +- .../search/SearchPhaseControllerTests.java | 263 +++++---- .../SearchQueryThenFetchAsyncActionTests.java | 25 +- .../search/SearchResponseMergerTests.java | 542 +++++++++--------- .../action/search/SearchResponseTests.java | 50 +- .../elasticsearch/search/SearchHitTests.java | 141 +++-- .../elasticsearch/search/SearchHitsTests.java | 35 +- .../metrics/InternalTopHitsTests.java | 20 +- .../fetch/FetchPhaseDocsIteratorTests.java | 1 + .../fetch/subphase/FetchFieldsPhaseTests.java | 2 +- .../fetch/subphase/FetchSourcePhaseTests.java | 2 +- .../fetch/subphase/FieldFetcherTests.java | 2 +- .../CompletionSuggestionOptionTests.java | 3 + .../RemoteClusterConnectionTests.java | 4 +- .../search/fetch/HighlighterTestCase.java | 2 +- .../test/AbstractXContentTestCase.java | 2 +- .../xpack/search/AsyncSearchSecurityIT.java | 2 +- .../core/ml/action/GetJobsStatsAction.java | 2 +- .../action/EnrichShardMultiSearchAction.java | 36 +- .../eql/execution/search/RuntimeUtils.java | 3 +- .../assembler/ImplicitTiebreakerTests.java | 6 +- .../assembler/SequenceSpecTests.java | 6 +- .../execution/sample/CircuitBreakerTests.java | 7 +- .../execution/sample/SampleIteratorTests.java | 2 +- .../CriterionOrdinalExtractionTests.java | 2 +- .../search/PITAwareQueryClientTests.java | 6 +- .../sequence/CircuitBreakerTests.java | 20 +- .../inference/registry/ModelRegistry.java | 19 +- .../TransportGetPipelineActionTests.java | 8 +- .../integration/ModelSnapshotRetentionIT.java | 4 +- .../extractor/scroll/ScrollDataExtractor.java | 19 +- .../extractor/DataFrameDataExtractor.java | 22 +- .../dataframe/inference/TestDocsIterator.java | 2 +- .../persistence/TrainedModelProvider.java | 13 +- .../TransportDeleteForecastActionTests.java | 2 +- .../chunked/ChunkedDataExtractorTests.java | 3 +- .../scroll/ScrollDataExtractorTests.java | 3 +- .../DataFrameAnalyticsTaskTests.java | 10 +- .../DataFrameDataExtractorTests.java | 3 +- .../inference/InferenceRunnerTests.java | 12 +- .../process/DataFrameRowsJoinerTests.java | 2 +- .../persistence/JobResultsPersisterTests.java | 20 +- .../persistence/JobResultsProviderTests.java | 3 +- .../ml/job/persistence/MockClientBuilder.java | 6 +- .../job/persistence/StateStreamerTests.java | 3 +- .../AbstractExpiredJobDataRemoverTests.java | 6 +- .../process/IndexingStateProcessorTests.java | 2 +- .../xpack/ml/test/SearchHitBuilder.java | 2 +- .../BatchedDocumentsIteratorTests.java | 3 +- .../security/ScrollHelperIntegTests.java | 7 +- ...sportSamlInvalidateSessionActionTests.java | 43 +- .../security/authc/ApiKeyServiceTests.java | 8 +- .../security/authc/TokenServiceTests.java | 4 +- .../IndexServiceAccountTokenStoreTests.java | 4 +- .../mapper/NativeRoleMappingStoreTests.java | 11 +- .../store/NativePrivilegeStoreTests.java | 22 +- .../sql/execution/search/SearchHitCursor.java | 4 +- .../sql/execution/search/SearchHitRowSet.java | 14 +- .../extractor/ComputingExtractorTests.java | 2 +- .../extractor/FieldHitExtractorTests.java | 24 +- .../search/extractor/ScoreExtractorTests.java | 2 +- .../extractor/TopHitsAggExtractorTests.java | 4 +- .../SeqNoPrimaryTermAndIndexTests.java | 2 +- .../ClientTransformIndexerTests.java | 6 +- .../TransformIndexerFailureHandlingTests.java | 6 +- .../CompositeBucketsChangeCollectorTests.java | 3 +- .../DateHistogramFieldCollectorTests.java | 3 +- .../transforms/pivot/PivotTests.java | 46 +- .../CompareConditionSearchTests.java | 4 +- .../history/HistoryActionConditionTests.java | 4 +- .../xpack/watcher/WatcherServiceTests.java | 4 +- .../execution/TriggeredWatchStoreTests.java | 18 +- 115 files changed, 1498 insertions(+), 1133 deletions(-) create mode 100644 docs/changelog/103763.yaml diff --git a/docs/changelog/103763.yaml b/docs/changelog/103763.yaml new file mode 100644 index 0000000000000..e4d6556c77077 --- /dev/null +++ b/docs/changelog/103763.yaml @@ -0,0 +1,6 @@ +pr: 103763 +summary: Ref count search response bytes +area: Search +type: enhancement +issues: + - 102657 diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java index c7dbee47ea823..cbb41dfa02c5f 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java @@ -318,7 +318,7 @@ private String mockSearches(String databaseName, int firstChunk, int lastChunk) Map> requestMap = new HashMap<>(); for (int i = firstChunk; i <= lastChunk; i++) { byte[] chunk = data.get(i - firstChunk); - SearchHit hit = new SearchHit(i); + SearchHit hit = SearchHit.unpooled(i); try (XContentBuilder builder = XContentBuilder.builder(XContentType.SMILE.xContent())) { builder.map(Map.of("data", chunk)); builder.flush(); @@ -328,7 +328,7 @@ private String mockSearches(String databaseName, int firstChunk, int lastChunk) throw new UncheckedIOException(ex); } - SearchHits hits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f); + SearchHits hits = SearchHits.unpooled(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f); SearchResponse searchResponse = new SearchResponse(hits, null, null, false, null, null, 0, null, 1, 1, 0, 1L, null, null); toRelease.add(searchResponse::decRef); @SuppressWarnings("unchecked") diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java index 4e3d9baaf5c92..138007c104d2b 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java @@ -97,7 +97,7 @@ public void process(HitContext hit) throws IOException { BytesReference document = percolateQuery.getDocuments().get(slot); leafStoredFields.advanceTo(slot); HitContext subContext = new HitContext( - new SearchHit(slot, "unknown"), + SearchHit.unpooled(slot, "unknown"), percolatorLeafReaderContext, slot, leafStoredFields.storedFields(), diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java index b65d966bd6551..82ec63b785e56 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorMatchedSlotSubFetchPhaseTests.java @@ -56,7 +56,7 @@ public void testHitsExecute() throws Exception { LeafReaderContext context = reader.leaves().get(0); // A match: { - HitContext hit = new HitContext(new SearchHit(0), context, 0, Map.of(), Source.empty(null)); + HitContext hit = new HitContext(SearchHit.unpooled(0), context, 0, Map.of(), Source.empty(null)); PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value")); MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); @@ -87,7 +87,7 @@ public void testHitsExecute() throws Exception { // No match: { - HitContext hit = new HitContext(new SearchHit(0), context, 0, Map.of(), Source.empty(null)); + HitContext hit = new HitContext(SearchHit.unpooled(0), context, 0, Map.of(), Source.empty(null)); PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value")); MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value1", new WhitespaceAnalyzer()); @@ -117,7 +117,7 @@ public void testHitsExecute() throws Exception { // No query: { - HitContext hit = new HitContext(new SearchHit(0), context, 0, Map.of(), Source.empty(null)); + HitContext hit = new HitContext(SearchHit.unpooled(0), context, 0, Map.of(), Source.empty(null)); PercolateQuery.QueryStore queryStore = ctx -> docId -> null; MemoryIndex memoryIndex = new MemoryIndex(); memoryIndex.addField("field", "value", new WhitespaceAnalyzer()); diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java index d58c15d4efd74..f57c02bcdcc22 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedSearchHit.java @@ -33,12 +33,12 @@ public class RatedSearchHit implements Writeable, ToXContentObject { private final OptionalInt rating; public RatedSearchHit(SearchHit searchHit, OptionalInt rating) { - this.searchHit = searchHit; + this.searchHit = searchHit.asUnpooled(); this.rating = rating; } RatedSearchHit(StreamInput in) throws IOException { - this(SearchHit.readFrom(in), in.readBoolean() ? OptionalInt.of(in.readVInt()) : OptionalInt.empty()); + this(SearchHit.readFrom(in, false), in.readBoolean() ? OptionalInt.of(in.readVInt()) : OptionalInt.empty()); } @Override diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java index 7dad062ab3bca..37de70ded462f 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/BulkByScrollUsesAllScrollDocumentsAfterConflictsIntegTests.java @@ -209,7 +209,7 @@ public void testDeleteByQuery() throws Exception { .addSort(SORTING_FIELD, SortOrder.DESC), response -> { // Modify a subset of the target documents concurrently - final List originalDocs = Arrays.asList(response.getHits().getHits()); + final List originalDocs = Arrays.asList(response.getHits().asUnpooled().getHits()); docsModifiedConcurrently.addAll(randomSubsetOf(finalConflictingOps, originalDocs)); } ); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java index c3cf7cf62f925..c40a4f72bc133 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java @@ -567,8 +567,8 @@ protected RequestWrapper buildRequest(Hit doc) { action.start(); // create a simulated response. - SearchHit hit = new SearchHit(0, "id").sourceRef(new BytesArray("{}")); - SearchHits hits = new SearchHits( + SearchHit hit = SearchHit.unpooled(0, "id").sourceRef(new BytesArray("{}")); + SearchHits hits = SearchHits.unpooled( IntStream.range(0, 100).mapToObj(i -> hit).toArray(SearchHit[]::new), new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0 diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java index 7ac50eb0e7c6c..44e69d3a4cda8 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java @@ -162,8 +162,8 @@ public void testScrollKeepAlive() { private SearchResponse createSearchResponse() { // create a simulated response. - SearchHit hit = new SearchHit(0, "id").sourceRef(new BytesArray("{}")); - SearchHits hits = new SearchHits( + SearchHit hit = SearchHit.unpooled(0, "id").sourceRef(new BytesArray("{}")); + SearchHits hits = SearchHits.unpooled( IntStream.range(0, randomIntBetween(0, 20)).mapToObj(i -> hit).toArray(SearchHit[]::new), new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0 diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java index 8f178397f508b..1fe128da6889c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/RandomScoreFunctionIT.java @@ -112,7 +112,7 @@ public void testConsistentHitsWithSameSeed() throws Exception { CoreMatchers.equalTo(0) ); final int hitCount = response.getHits().getHits().length; - final SearchHit[] currentHits = response.getHits().getHits(); + final SearchHit[] currentHits = response.getHits().asUnpooled().getHits(); ArrayUtil.timSort(currentHits, (o1, o2) -> { // for tie-breaking we have to resort here since if the score is // identical we rely on collection order which might change. diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 591b9a86cda20..1da114adb34f6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -172,7 +172,7 @@ abstract class AbstractSearchAsyncAction exten this.results = resultConsumer; // register the release of the query consumer to free up the circuit breaker memory // at the end of the search - addReleasable(resultConsumer::decRef); + addReleasable(resultConsumer); this.clusters = clusters; } diff --git a/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java b/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java index b4fd0107f731f..96f10d7d8a30e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java +++ b/server/src/main/java/org/elasticsearch/action/search/ArraySearchPhaseResults.java @@ -9,11 +9,11 @@ package org.elasticsearch.action.search; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.core.AbstractRefCounted; -import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.transport.LeakTracker; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Stream; /** @@ -22,7 +22,13 @@ class ArraySearchPhaseResults extends SearchPhaseResults { final AtomicArray results; - private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(this::doClose)); + private final AtomicBoolean closed = new AtomicBoolean(false); + + private final Releasable releasable = LeakTracker.wrap(() -> { + for (Result result : getAtomicArray().asList()) { + result.decRef(); + } + }); ArraySearchPhaseResults(int size) { super(size); @@ -41,12 +47,16 @@ void consumeResult(Result result, Runnable next) { next.run(); } - protected void doClose() { - for (Result result : getAtomicArray().asList()) { - result.decRef(); + @Override + public final void close() { + if (closed.compareAndSet(false, true)) { + releasable.close(); + doClose(); } } + protected void doClose() {} + boolean hasResult(int shardIndex) { return results.get(shardIndex) != null; } @@ -55,24 +65,4 @@ boolean hasResult(int shardIndex) { AtomicArray getAtomicArray() { return results; } - - @Override - public void incRef() { - refCounted.incRef(); - } - - @Override - public boolean tryIncRef() { - return refCounted.tryIncRef(); - } - - @Override - public boolean decRef() { - return refCounted.decRef(); - } - - @Override - public boolean hasReferences() { - return refCounted.hasReferences(); - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index 9900ee9d824ae..52f41179795d6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -482,24 +482,7 @@ Stream getSuccessfulResults() { } @Override - public void incRef() { - - } - - @Override - public boolean tryIncRef() { - return false; - } - - @Override - public boolean decRef() { - return false; - } - - @Override - public boolean hasReferences() { - return false; - } + public void close() {} } private GroupShardsIterator getIterator( diff --git a/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java index 13972ea2bf64a..2c4cb31584323 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumer.java @@ -104,20 +104,5 @@ AtomicArray getAtomicArray() { } @Override - public void incRef() {} - - @Override - public boolean tryIncRef() { - return true; - } - - @Override - public boolean decRef() { - return true; - } - - @Override - public boolean hasReferences() { - return false; - } + public void close() {} } diff --git a/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java b/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java index 3a12b72570caf..0e6830dcfab0e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java +++ b/server/src/main/java/org/elasticsearch/action/search/CountedCollector.java @@ -25,7 +25,6 @@ final class CountedCollector { CountedCollector(SearchPhaseResults resultConsumer, int expectedOps, Runnable onFinish, SearchPhaseContext context) { this.resultConsumer = resultConsumer; - resultConsumer.incRef(); this.counter = new CountDown(expectedOps); this.onFinish = onFinish; this.context = context; @@ -38,11 +37,7 @@ final class CountedCollector { void countDown() { assert counter.isCountedDown() == false : "more operations executed than specified"; if (counter.countDown()) { - try { - onFinish.run(); - } finally { - resultConsumer.decRef(); - } + onFinish.run(); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index 54408cd560314..77f1931f62537 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -66,7 +66,7 @@ final class DfsQueryPhase extends SearchPhase { // register the release of the query consumer to free up the circuit breaker memory // at the end of the search - context.addReleasable(queryResult::decRef); + context.addReleasable(queryResult); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index 00e2b41fde3da..7741c1483f69a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -96,6 +96,7 @@ public void run() { hit.setInnerHits(Maps.newMapWithExpectedSize(innerHitBuilders.size())); } hit.getInnerHits().put(innerHitBuilder.getName(), innerHits); + innerHits.mustIncRef(); } } onPhaseDone(); diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 11528f8e1521f..1f06158951392 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -38,11 +38,16 @@ final class FetchSearchPhase extends SearchPhase { private final AggregatedDfs aggregatedDfs; FetchSearchPhase(SearchPhaseResults resultConsumer, AggregatedDfs aggregatedDfs, SearchPhaseContext context) { - this(resultConsumer, aggregatedDfs, context, (response, queryPhaseResults) -> { - response.mustIncRef(); - context.addReleasable(response::decRef); - return new ExpandSearchPhase(context, response.hits, () -> new FetchLookupFieldsPhase(context, response, queryPhaseResults)); - }); + this( + resultConsumer, + aggregatedDfs, + context, + (response, queryPhaseResults) -> new ExpandSearchPhase( + context, + response.hits, + () -> new FetchLookupFieldsPhase(context, response, queryPhaseResults) + ) + ); } FetchSearchPhase( @@ -61,7 +66,7 @@ final class FetchSearchPhase extends SearchPhase { ); } this.fetchResults = new ArraySearchPhaseResults<>(resultConsumer.getNumShards()); - context.addReleasable(fetchResults::decRef); + context.addReleasable(fetchResults); this.queryResults = resultConsumer.getAtomicArray(); this.aggregatedDfs = aggregatedDfs; this.nextPhaseFactory = nextPhaseFactory; @@ -225,10 +230,8 @@ private void moveToNextPhase( AtomicArray fetchResultsArr ) { var resp = SearchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr); - try { - context.executeNextPhase(this, nextPhaseFactory.apply(resp, queryResults)); - } finally { - resp.decRef(); - } + context.addReleasable(resp::decRef); + fetchResults.close(); + context.executeNextPhase(this, nextPhaseFactory.apply(resp, queryResults)); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index b7b113601560b..34ee0fc146aa5 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -105,11 +105,7 @@ public QueryPhaseResultConsumer( @Override protected void doClose() { - try { - super.doClose(); - } finally { - pendingMerges.close(); - } + pendingMerges.close(); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 6fcfc97c33c9e..fcc848384866a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -68,7 +68,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction clusters ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; - addReleasable(queryPhaseResultConsumer::decRef); + addReleasable(queryPhaseResultConsumer); this.progressListener = task.getProgressListener(); // don't build the SearchShard list (can be expensive) if the SearchProgressListener won't use it if (progressListener != SearchProgressListener.NOOP) { @@ -95,7 +95,6 @@ protected SearchPhase getNextPhase(final SearchPhaseResults res final List dfsSearchResults = results.getAtomicArray().asList(); final AggregatedDfs aggregatedDfs = SearchPhaseController.aggregateDfs(dfsSearchResults); final List mergedKnnResults = SearchPhaseController.mergeKnnResults(getRequest(), dfsSearchResults); - queryPhaseResultConsumer.incRef(); return new DfsQueryPhase( dfsSearchResults, aggregatedDfs, diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 5ffb9024d3ee1..6cfea93068a86 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -364,11 +364,15 @@ public static SearchResponseSections merge( } ScoreDoc[] sortedDocs = reducedQueryPhase.sortedTopDocs.scoreDocs; var fetchResults = fetchResultsArray.asList(); - SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, fetchResultsArray); - if (reducedQueryPhase.suggest != null && fetchResults.isEmpty() == false) { - mergeSuggest(reducedQueryPhase, fetchResultsArray, hits, sortedDocs); + final SearchHits hits = getHits(reducedQueryPhase, ignoreFrom, fetchResultsArray); + try { + if (reducedQueryPhase.suggest != null && fetchResults.isEmpty() == false) { + mergeSuggest(reducedQueryPhase, fetchResultsArray, hits, sortedDocs); + } + return reducedQueryPhase.buildResponse(hits, fetchResults); + } finally { + hits.decRef(); } - return reducedQueryPhase.buildResponse(hits, fetchResults); } private static void mergeSuggest( @@ -462,6 +466,7 @@ private static SearchHits getHits( searchHit.score(shardDoc.score); } hits.add(searchHit); + searchHit.incRef(); } } return new SearchHits( diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java index 11b8e0a0792a3..28606ecc09f90 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseResults.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; import org.elasticsearch.search.SearchPhaseResult; import java.util.stream.Stream; @@ -17,7 +17,7 @@ /** * This class acts as a basic result collection that can be extended to do on-the-fly reduction or result processing */ -abstract class SearchPhaseResults implements RefCounted { +abstract class SearchPhaseResults implements Releasable { private final int numShards; SearchPhaseResults(int numShards) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index a9943d7b43397..84b9dc745ed92 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -20,7 +20,9 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestActions; @@ -30,6 +32,7 @@ import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; @@ -81,9 +84,16 @@ public class SearchResponse extends ActionResponse implements ChunkedToXContentO private final Clusters clusters; private final long tookInMillis; + private final RefCounted refCounted = LeakTracker.wrap(new AbstractRefCounted() { + @Override + protected void closeInternal() { + hits.decRef(); + } + }); + public SearchResponse(StreamInput in) throws IOException { super(in); - this.hits = new SearchHits(in); + this.hits = SearchHits.readFrom(in, true); this.aggregations = in.readBoolean() ? InternalAggregations.readFrom(in) : null; this.suggest = in.readBoolean() ? new Suggest(in) : null; this.timedOut = in.readBoolean(); @@ -191,6 +201,7 @@ public SearchResponse( String pointInTimeId ) { this.hits = hits; + hits.incRef(); this.aggregations = aggregations; this.suggest = suggest; this.profileResults = profileResults; @@ -210,6 +221,26 @@ public SearchResponse( : "SearchResponse can't have both scrollId [" + scrollId + "] and searchContextId [" + pointInTimeId + "]"; } + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + public RestStatus status() { return RestStatus.status(successfulShards, totalShards, shardFailures); } @@ -218,6 +249,7 @@ public RestStatus status() { * The search hits. */ public SearchHits getHits() { + assert hasReferences(); return hits; } @@ -344,6 +376,7 @@ public Clusters getClusters() { @Override public Iterator toXContentChunked(ToXContent.Params params) { + assert hasReferences(); return Iterators.concat( ChunkedToXContentHelper.startObject(), this.innerToXContentChunked(params), @@ -493,6 +526,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE } } } + return new SearchResponse( hits, aggs, @@ -514,6 +548,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE @Override public void writeTo(StreamOutput out) throws IOException { + assert hasReferences(); hits.writeTo(out); out.writeOptionalWriteable((InternalAggregations) aggregations); out.writeOptionalWriteable(suggest); @@ -537,7 +572,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return Strings.toString(this); + return hasReferences() == false ? "SearchResponse[released]" : Strings.toString(this); } /** diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index 0586cbb9046dc..9db9d65bc3dac 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -204,33 +204,37 @@ public SearchResponse getMergedResponse(Clusters clusters) { setTopDocsShardIndex(shards, topDocsList); TopDocs topDocs = mergeTopDocs(topDocsList, size, from); SearchHits mergedSearchHits = topDocsToSearchHits(topDocs, topDocsStats); - setSuggestShardIndex(shards, groupedSuggestions); - Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions)); - InternalAggregations reducedAggs = aggs.isEmpty() - ? InternalAggregations.EMPTY - : InternalAggregations.topLevelReduce(aggs, aggReduceContextBuilder.forFinalReduction()); - ShardSearchFailure[] shardFailures = failures.toArray(ShardSearchFailure.EMPTY_ARRAY); - SearchProfileResults profileShardResults = profileResults.isEmpty() ? null : new SearchProfileResults(profileResults); - // make failures ordering consistent between ordinary search and CCS by looking at the shard they come from - Arrays.sort(shardFailures, FAILURES_COMPARATOR); - long tookInMillis = searchTimeProvider.buildTookInMillis(); - return new SearchResponse( - mergedSearchHits, - reducedAggs, - suggest, - topDocsStats.timedOut, - topDocsStats.terminatedEarly, - profileShardResults, - numReducePhases, - null, - totalShards, - successfulShards, - skippedShards, - tookInMillis, - shardFailures, - clusters, - null - ); + try { + setSuggestShardIndex(shards, groupedSuggestions); + Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions)); + InternalAggregations reducedAggs = aggs.isEmpty() + ? InternalAggregations.EMPTY + : InternalAggregations.topLevelReduce(aggs, aggReduceContextBuilder.forFinalReduction()); + ShardSearchFailure[] shardFailures = failures.toArray(ShardSearchFailure.EMPTY_ARRAY); + SearchProfileResults profileShardResults = profileResults.isEmpty() ? null : new SearchProfileResults(profileResults); + // make failures ordering consistent between ordinary search and CCS by looking at the shard they come from + Arrays.sort(shardFailures, FAILURES_COMPARATOR); + long tookInMillis = searchTimeProvider.buildTookInMillis(); + return new SearchResponse( + mergedSearchHits, + reducedAggs, + suggest, + topDocsStats.timedOut, + topDocsStats.terminatedEarly, + profileShardResults, + numReducePhases, + null, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardFailures, + clusters, + null + ); + } finally { + mergedSearchHits.decRef(); + } } private static final Comparator FAILURES_COMPARATOR = new Comparator() { @@ -376,6 +380,7 @@ private static SearchHits topDocsToSearchHits(TopDocs topDocs, TopDocsStats topD for (int i = 0; i < topDocs.scoreDocs.length; i++) { FieldDocAndSearchHit scoreDoc = (FieldDocAndSearchHit) topDocs.scoreDocs[i]; searchHits[i] = scoreDoc.searchHit; + scoreDoc.searchHit.mustIncRef(); } } SortField[] sortFields = null; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java index 805ef033db27a..d52a585b3e792 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java @@ -64,6 +64,7 @@ public SearchResponseSections( int numReducePhases ) { this.hits = hits; + hits.incRef(); this.aggregations = aggregations; this.suggest = suggest; this.profileResults = profileResults; @@ -73,7 +74,7 @@ public SearchResponseSections( refCounted = hits.getHits().length > 0 ? LeakTracker.wrap(new AbstractRefCounted() { @Override protected void closeInternal() { - // TODO: noop until hits are ref counted + hits.decRef(); } }) : ALWAYS_REFERENCED; } diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java index 905373f9400f6..8973ae6e9dd3a 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java @@ -32,7 +32,6 @@ public final class ReleasableBytesReference implements RefCounted, Releasable, B private final RefCounted refCounted; public static ReleasableBytesReference empty() { - EMPTY.incRef(); return EMPTY; } @@ -147,6 +146,9 @@ public StreamInput streamInput() throws IOException { assert hasReferences(); return new BytesReferenceStreamInput(this) { private ReleasableBytesReference retainAndSkip(int len) throws IOException { + if (len == 0) { + return ReleasableBytesReference.empty(); + } // instead of reading the bytes from a stream we just create a slice of the underlying bytes final ReleasableBytesReference result = retainedSlice(offset(), len); // move the stream manually since creating the slice didn't move it diff --git a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java index 5db1732fc1590..42cf8a185ec7a 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/ClientScrollableHitSource.java @@ -157,8 +157,8 @@ private static class ClientHit implements Hit { private final BytesReference source; ClientHit(SearchHit delegate) { - this.delegate = delegate; - source = delegate.hasSource() ? delegate.getSourceRef() : null; + this.delegate = delegate.asUnpooled(); // TODO: use pooled version here + source = this.delegate.hasSource() ? this.delegate.getSourceRef() : null; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index d2fc20ab83269..dde044bf15115 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -13,6 +13,7 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.document.DocumentField; @@ -24,7 +25,9 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.MapperService; @@ -35,6 +38,7 @@ import org.elasticsearch.search.fetch.subphase.LookupField; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.lookup.Source; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; @@ -55,6 +59,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Collectors; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -70,7 +75,7 @@ * * @see SearchHits */ -public final class SearchHit implements Writeable, ToXContentObject { +public final class SearchHit implements Writeable, ToXContentObject, RefCounted { private final transient int docId; @@ -114,6 +119,8 @@ public final class SearchHit implements Writeable, ToXContentObject { private Map innerHits; + private final RefCounted refCounted; + // used only in tests public SearchHit(int docId) { this(docId, null); @@ -124,6 +131,10 @@ public SearchHit(int docId, String id) { } public SearchHit(int nestedTopDocId, String id, NestedIdentity nestedIdentity) { + this(nestedTopDocId, id, nestedIdentity, null); + } + + private SearchHit(int nestedTopDocId, String id, NestedIdentity nestedIdentity, @Nullable RefCounted refCounted) { this( nestedTopDocId, DEFAULT_SCORE, @@ -142,8 +153,10 @@ public SearchHit(int nestedTopDocId, String id, NestedIdentity nestedIdentity) { null, null, null, + null, new HashMap<>(), - new HashMap<>() + new HashMap<>(), + refCounted ); } @@ -164,9 +177,11 @@ public SearchHit( SearchShardTarget shard, String index, String clusterAlias, + Map sourceAsMap, Map innerHits, Map documentFields, - Map metaFields + Map metaFields, + @Nullable RefCounted refCounted ) { this.docId = docId; this.score = score; @@ -184,12 +199,28 @@ public SearchHit( this.shard = shard; this.index = index; this.clusterAlias = clusterAlias; + this.sourceAsMap = sourceAsMap; this.innerHits = innerHits; this.documentFields = documentFields; this.metaFields = metaFields; + this.refCounted = refCounted == null ? LeakTracker.wrap(new AbstractRefCounted() { + @Override + protected void closeInternal() { + if (SearchHit.this.innerHits != null) { + for (SearchHits h : SearchHit.this.innerHits.values()) { + h.decRef(); + } + SearchHit.this.innerHits = null; + } + if (SearchHit.this.source instanceof RefCounted r) { + r.decRef(); + } + SearchHit.this.source = null; + } + }) : ALWAYS_REFERENCED; } - public static SearchHit readFrom(StreamInput in) throws IOException { + public static SearchHit readFrom(StreamInput in, boolean pooled) throws IOException { final float score = in.readFloat(); final int rank; if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { @@ -205,7 +236,7 @@ public static SearchHit readFrom(StreamInput in) throws IOException { final long version = in.readLong(); final long seqNo = in.readZLong(); final long primaryTerm = in.readVLong(); - BytesReference source = in.readBytesReference(); + BytesReference source = pooled ? in.readReleasableBytesReference() : in.readBytesReference(); if (source.length() == 0) { source = null; } @@ -244,7 +275,7 @@ public static SearchHit readFrom(StreamInput in) throws IOException { if (size > 0) { innerHits = Maps.newMapWithExpectedSize(size); for (int i = 0; i < size; i++) { - innerHits.put(in.readString(), new SearchHits(in)); + innerHits.put(in.readString(), SearchHits.readFrom(in, pooled)); } } else { innerHits = null; @@ -266,16 +297,31 @@ public static SearchHit readFrom(StreamInput in) throws IOException { shardTarget, index, clusterAlias, + null, innerHits, documentFields, - metaFields + metaFields, + pooled ? null : ALWAYS_REFERENCED ); } + public static SearchHit unpooled(int docId) { + return unpooled(docId, null); + } + + public static SearchHit unpooled(int docId, String id) { + return unpooled(docId, id, null); + } + + public static SearchHit unpooled(int nestedTopDocId, String id, NestedIdentity nestedIdentity) { + return new SearchHit(nestedTopDocId, id, nestedIdentity, ALWAYS_REFERENCED); + } + private static final Text SINGLE_MAPPING_TYPE = new Text(MapperService.SINGLE_MAPPING_NAME); @Override public void writeTo(StreamOutput out) throws IOException { + assert hasReferences(); out.writeFloat(score); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeVInt(rank); @@ -401,6 +447,7 @@ public NestedIdentity getNestedIdentity() { * Returns bytes reference, also uncompress the source if needed. */ public BytesReference getSourceRef() { + assert hasReferences(); if (this.source == null) { return null; } @@ -427,6 +474,7 @@ public SearchHit sourceRef(BytesReference source) { * {@code _source} or if source is disabled in the mapping. */ public boolean hasSource() { + assert hasReferences(); return source != null; } @@ -434,6 +482,7 @@ public boolean hasSource() { * The source of the document as string (can be {@code null}). */ public String getSourceAsString() { + assert hasReferences(); if (source == null) { return null; } @@ -448,6 +497,7 @@ public String getSourceAsString() { * The source of the document as a map (can be {@code null}). */ public Map getSourceAsMap() { + assert hasReferences(); if (source == null) { return null; } @@ -463,6 +513,7 @@ public Map getSourceAsMap() { * The hit field matching the given field name. */ public DocumentField field(String fieldName) { + assert hasReferences(); DocumentField result = documentFields.get(fieldName); if (result != null) { return result; @@ -653,13 +704,72 @@ public Map getMatchedQueriesAndScores() { * @return Inner hits or null if there are none */ public Map getInnerHits() { + assert hasReferences(); return innerHits; } public void setInnerHits(Map innerHits) { + assert innerHits == null || innerHits.values().stream().noneMatch(h -> h.hasReferences() == false); + assert this.innerHits == null; this.innerHits = innerHits; } + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + + public SearchHit asUnpooled() { + assert hasReferences(); + if (isPooled() == false) { + return this; + } + return new SearchHit( + docId, + score, + rank, + id, + nestedIdentity, + version, + seqNo, + primaryTerm, + source instanceof RefCounted ? new BytesArray(source.toBytesRef(), true) : source, + highlightFields, + sortValues, + matchedQueries, + explanation, + shard, + index, + clusterAlias, + sourceAsMap, + innerHits == null + ? null + : innerHits.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().asUnpooled())), + documentFields, + metaFields, + ALWAYS_REFERENCED + ); + } + + public boolean isPooled() { + return refCounted != ALWAYS_REFERENCED; + } + public static class Fields { static final String _INDEX = "_index"; static final String _ID = "_id"; @@ -690,6 +800,7 @@ public static class Fields { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + assert hasReferences(); builder.startObject(); toInnerXContent(builder, params); builder.endObject(); @@ -972,9 +1083,11 @@ public static SearchHit createFromMap(Map values) { shardTarget, index, clusterAlias, + null, get(Fields.INNER_HITS, values, null), get(DOCUMENT_FIELDS, values, Collections.emptyMap()), - get(METADATA_FIELDS, values, Collections.emptyMap()) + get(METADATA_FIELDS, values, Collections.emptyMap()), + ALWAYS_REFERENCED // TODO: do we ever want pooling here? ); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchHits.java b/server/src/main/java/org/elasticsearch/search/SearchHits.java index c689f928954d2..a5c9425ba754c 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHits.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHits.java @@ -18,8 +18,11 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; @@ -32,7 +35,7 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; -public final class SearchHits implements Writeable, ChunkedToXContent, Iterable { +public final class SearchHits implements Writeable, ChunkedToXContent, RefCounted, Iterable { public static final SearchHit[] EMPTY = new SearchHit[0]; public static final SearchHits EMPTY_WITH_TOTAL_HITS = SearchHits.empty(new TotalHits(0, Relation.EQUAL_TO), 0); @@ -48,6 +51,8 @@ public final class SearchHits implements Writeable, ChunkedToXContent, Iterable< @Nullable private final Object[] collapseValues; + private final RefCounted refCounted; + public static SearchHits empty(@Nullable TotalHits totalHits, float maxScore) { return new SearchHits(EMPTY, totalHits, maxScore); } @@ -63,6 +68,35 @@ public SearchHits( @Nullable SortField[] sortFields, @Nullable String collapseField, @Nullable Object[] collapseValues + ) { + this( + hits, + totalHits, + maxScore, + sortFields, + collapseField, + collapseValues, + hits.length == 0 ? ALWAYS_REFERENCED : LeakTracker.wrap(new AbstractRefCounted() { + @Override + protected void closeInternal() { + for (int i = 0; i < hits.length; i++) { + assert hits[i] != null; + hits[i].decRef(); + hits[i] = null; + } + } + }) + ); + } + + private SearchHits( + SearchHit[] hits, + @Nullable TotalHits totalHits, + float maxScore, + @Nullable SortField[] sortFields, + @Nullable String collapseField, + @Nullable Object[] collapseValues, + RefCounted refCounted ) { this.hits = hits; this.totalHits = totalHits; @@ -70,32 +104,64 @@ public SearchHits( this.sortFields = sortFields; this.collapseField = collapseField; this.collapseValues = collapseValues; + this.refCounted = refCounted; } - public SearchHits(StreamInput in) throws IOException { + public static SearchHits unpooled(SearchHit[] hits, @Nullable TotalHits totalHits, float maxScore) { + return unpooled(hits, totalHits, maxScore, null, null, null); + } + + public static SearchHits unpooled( + SearchHit[] hits, + @Nullable TotalHits totalHits, + float maxScore, + @Nullable SortField[] sortFields, + @Nullable String collapseField, + @Nullable Object[] collapseValues + ) { + assert assertUnpooled(hits); + return new SearchHits(hits, totalHits, maxScore, sortFields, collapseField, collapseValues, ALWAYS_REFERENCED); + } + + private static boolean assertUnpooled(SearchHit[] searchHits) { + for (SearchHit searchHit : searchHits) { + assert searchHit.isPooled() == false : "hit was pooled [" + searchHit + "]"; + } + return true; + } + + public static SearchHits readFrom(StreamInput in, boolean pooled) throws IOException { + final TotalHits totalHits; if (in.readBoolean()) { totalHits = Lucene.readTotalHits(in); } else { // track_total_hits is false totalHits = null; } - maxScore = in.readFloat(); + final float maxScore = in.readFloat(); int size = in.readVInt(); + final SearchHit[] hits; if (size == 0) { hits = EMPTY; } else { hits = new SearchHit[size]; for (int i = 0; i < hits.length; i++) { - hits[i] = SearchHit.readFrom(in); + hits[i] = SearchHit.readFrom(in, pooled); } } - sortFields = in.readOptionalArray(Lucene::readSortField, SortField[]::new); - collapseField = in.readOptionalString(); - collapseValues = in.readOptionalArray(Lucene::readSortValue, Object[]::new); + var sortFields = in.readOptionalArray(Lucene::readSortField, SortField[]::new); + var collapseField = in.readOptionalString(); + var collapseValues = in.readOptionalArray(Lucene::readSortValue, Object[]::new); + if (pooled) { + return new SearchHits(hits, totalHits, maxScore, sortFields, collapseField, collapseValues); + } else { + return unpooled(hits, totalHits, maxScore, sortFields, collapseField, collapseValues); + } } @Override public void writeTo(StreamOutput out) throws IOException { + assert hasReferences(); final boolean hasTotalHits = totalHits != null; out.writeBoolean(hasTotalHits); if (hasTotalHits) { @@ -128,6 +194,7 @@ public float getMaxScore() { * The hits of the search request (based on the search type, and from / size provided). */ public SearchHit[] getHits() { + assert hasReferences(); return this.hits; } @@ -135,6 +202,7 @@ public SearchHit[] getHits() { * Return the hit as the provided position. */ public SearchHit getAt(int position) { + assert hasReferences(); return hits[position]; } @@ -165,9 +233,42 @@ public Object[] getCollapseValues() { @Override public Iterator iterator() { + assert hasReferences(); return Iterators.forArray(getHits()); } + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + + public SearchHits asUnpooled() { + assert hasReferences(); + if (refCounted == ALWAYS_REFERENCED) { + return this; + } + final SearchHit[] unpooledHits = new SearchHit[hits.length]; + for (int i = 0; i < hits.length; i++) { + unpooledHits[i] = hits[i].asUnpooled(); + } + return unpooled(unpooledHits, totalHits, maxScore, sortFields, collapseField, collapseValues); + } + public static final class Fields { public static final String HITS = "hits"; public static final String TOTAL = "total"; @@ -176,6 +277,7 @@ public static final class Fields { @Override public Iterator toXContentChunked(ToXContent.Params params) { + assert hasReferences(); return Iterators.concat(Iterators.single((b, p) -> b.startObject(Fields.HITS)), Iterators.single((b, p) -> { boolean totalHitAsInt = params.paramAsBoolean(RestSearchAction.TOTAL_HITS_AS_INT_PARAM, false); if (totalHitAsInt) { @@ -239,7 +341,7 @@ public static SearchHits fromXContent(XContentParser parser) throws IOException } } } - return new SearchHits(hits.toArray(SearchHits.EMPTY), totalHits, maxScore); + return SearchHits.unpooled(hits.toArray(SearchHits.EMPTY), totalHits, maxScore); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java index 77cb482edd8b4..fd637e14581ca 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalTopHits.java @@ -52,7 +52,7 @@ public InternalTopHits( this.from = from; this.size = size; this.topDocs = topDocs; - this.searchHits = searchHits; + this.searchHits = searchHits.asUnpooled(); } /** @@ -63,7 +63,7 @@ public InternalTopHits(StreamInput in) throws IOException { from = in.readVInt(); size = in.readVInt(); topDocs = Lucene.readTopDocs(in); - searchHits = new SearchHits(in); + searchHits = SearchHits.readFrom(in, false); } @Override @@ -152,8 +152,9 @@ private static SearchHits extractSearchHits( position = tracker[shardIndex]++; } while (topDocsForShard.scoreDocs[position] != scoreDoc); hits[i] = ((InternalTopHits) aggregations.get(shardIndex)).searchHits.getAt(position); + assert hits[i].isPooled() == false; } - return new SearchHits(hits, reducedTopDocs.totalHits, maxScore); + return SearchHits.unpooled(hits, reducedTopDocs.totalHits, maxScore); } private static float reduceAndFindMaxScore(List aggregations, TopDocs[] shardDocs) { diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 91e4fb791f62d..c106d9b6f4cb2 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -82,6 +82,7 @@ public void execute(SearchContext context, int[] docIdsToLoad) { // Only set the shardResults if building search hits was successful if (hits != null) { context.fetchResult().shardResult(hits, profileResult); + hits.decRef(); } } } @@ -173,7 +174,7 @@ protected SearchHit nextDoc(int doc) throws IOException { } TotalHits totalHits = context.getTotalHits(); - return new SearchHits(hits, totalHits, context.getMaxScore()); + return SearchHits.unpooled(hits, totalHits, context.getMaxScore()); } List getProcessors(SearchShardTarget target, FetchContext context, Profiler profiler) { @@ -247,11 +248,12 @@ private static HitContext prepareNonNestedHitContext( String id = idLoader.getId(subDocId); if (id == null) { - SearchHit hit = new SearchHit(docId, null); + // TODO: can we use pooled buffers here as well? + SearchHit hit = SearchHit.unpooled(docId, null); Source source = Source.lazy(lazyStoredSourceLoader(profiler, subReaderContext, subDocId)); return new HitContext(hit, subReaderContext, subDocId, Map.of(), source); } else { - SearchHit hit = new SearchHit(docId, id); + SearchHit hit = SearchHit.unpooled(docId, id); Source source; if (requiresSource) { Timer timer = profiler.startLoadingSource(); @@ -328,7 +330,7 @@ private static HitContext prepareNestedHitContext( assert nestedIdentity != null; Source nestedSource = nestedIdentity.extractSource(rootSource); - SearchHit hit = new SearchHit(topDocId, rootId, nestedIdentity); + SearchHit hit = SearchHit.unpooled(topDocId, rootId, nestedIdentity); return new HitContext(hit, subReaderContext, nestedInfo.doc(), childFieldLoader.storedFields(), nestedSource); } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java index ea5ab13c2e8ee..cc39113f2009f 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchPhaseDocsIterator.java @@ -70,6 +70,11 @@ public final SearchHit[] iterate(SearchShardTarget shardTarget, IndexReader inde searchHits[docs[i].index] = nextDoc(docs[i].docId); } } catch (Exception e) { + for (SearchHit searchHit : searchHits) { + if (searchHit != null) { + searchHit.decRef(); + } + } throw new FetchPhaseExecutionException(shardTarget, "Error running fetch phase for doc [" + currentDoc + "]", e); } return searchHits; diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java index aa5c1f2cbd992..6cf924a239208 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java @@ -30,7 +30,12 @@ public final class FetchSearchResult extends SearchPhaseResult { private ProfileResult profileResult; - private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> hits = null)); + private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> { + if (hits != null) { + hits.decRef(); + hits = null; + } + })); public FetchSearchResult() {} @@ -42,12 +47,13 @@ public FetchSearchResult(ShardSearchContextId id, SearchShardTarget shardTarget) public FetchSearchResult(StreamInput in) throws IOException { super(in); contextId = new ShardSearchContextId(in); - hits = new SearchHits(in); + hits = SearchHits.readFrom(in, true); profileResult = in.readOptionalWriteable(ProfileResult::new); } @Override public void writeTo(StreamOutput out) throws IOException { + assert hasReferences(); contextId.writeTo(out); hits.writeTo(out); out.writeOptionalWriteable(profileResult); @@ -61,6 +67,7 @@ public FetchSearchResult fetchResult() { public void shardResult(SearchHits hits, ProfileResult profileResult) { assert assertNoSearchTarget(hits); this.hits = hits; + hits.incRef(); assert this.profileResult == null; this.profileResult = profileResult; } @@ -73,6 +80,7 @@ private static boolean assertNoSearchTarget(SearchHits hits) { } public SearchHits hits() { + assert hasReferences(); return hits; } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java index feb0547a32536..ccb54801472a6 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsPhase.java @@ -103,7 +103,9 @@ private void hitExecute(Map innerHi searchHitFields.sortValues(fieldDoc.fields, innerHitsContext.sort().formats); } } - results.put(entry.getKey(), fetchResult.hits()); + var h = fetchResult.hits(); + results.put(entry.getKey(), h); + h.mustIncRef(); } } } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java index 40ff9c6eaf6ee..7210c35d961ac 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestion.java @@ -271,7 +271,9 @@ public Option(int docID, Text text, float score, Map> contex public Option(StreamInput in) throws IOException { super(in); this.doc = Lucene.readScoreDoc(in); - this.hit = in.readOptionalWriteable(SearchHit::readFrom); + if (in.readBoolean()) { + this.hit = SearchHit.readFrom(in, false); + } int contextSize = in.readInt(); this.contexts = Maps.newLinkedHashMapWithExpectedSize(contextSize); for (int i = 0; i < contextSize; i++) { @@ -309,7 +311,7 @@ public void setShardIndex(int shardIndex) { } public void setHit(SearchHit hit) { - this.hit = hit; + this.hit = hit == null ? null : hit.asUnpooled(); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index 0288a5b92c772..607d83d4aab31 100644 --- a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -134,8 +134,7 @@ public void testTookWithRealClock() { private void runTestTook(final boolean controlled) { final AtomicLong expected = new AtomicLong(); - var result = new ArraySearchPhaseResults<>(10); - try { + try (var result = new ArraySearchPhaseResults<>(10)) { AbstractSearchAsyncAction action = createAction(new SearchRequest(), result, null, controlled, expected); final long actual = action.buildTookInMillis(); if (controlled) { @@ -145,16 +144,13 @@ private void runTestTook(final boolean controlled) { // with a real clock, the best we can say is that it took as long as we spun for assertThat(actual, greaterThanOrEqualTo(TimeUnit.NANOSECONDS.toMillis(expected.get()))); } - } finally { - result.decRef(); } } public void testBuildShardSearchTransportRequest() { SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(randomBoolean()); final AtomicLong expected = new AtomicLong(); - var result = new ArraySearchPhaseResults<>(10); - try { + try (var result = new ArraySearchPhaseResults<>(10)) { AbstractSearchAsyncAction action = createAction(searchRequest, result, null, false, expected); String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10); SearchShardIterator iterator = new SearchShardIterator( @@ -170,8 +166,6 @@ public void testBuildShardSearchTransportRequest() { assertEquals(2.0f, shardSearchTransportRequest.indexBoost(), 0.0f); assertArrayEquals(new String[] { "name", "name1" }, shardSearchTransportRequest.indices()); assertEquals(clusterAlias, shardSearchTransportRequest.getClusterAlias()); - } finally { - result.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumerTests.java b/server/src/test/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumerTests.java index 33e6096bab763..4a7d0cc8208e2 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CountOnlyQueryPhaseResultConsumerTests.java @@ -41,8 +41,9 @@ public void testProgressListenerExceptionsAreCaught() throws Exception { ); searchProgressListener.notifyListShards(searchShards, Collections.emptyList(), SearchResponse.Clusters.EMPTY, false, timeProvider); - CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer(searchProgressListener, 10); - try { + try ( + CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer(searchProgressListener, 10) + ) { AtomicInteger nextCounter = new AtomicInteger(0); for (int i = 0; i < 10; i++) { SearchShardTarget searchShardTarget = new SearchShardTarget("node", new ShardId("index", "uuid", i), null); @@ -58,14 +59,16 @@ public void testProgressListenerExceptionsAreCaught() throws Exception { queryPhaseResultConsumer.reduce(); assertEquals(1, searchProgressListener.onFinalReduce.get()); assertEquals(10, nextCounter.get()); - } finally { - queryPhaseResultConsumer.decRef(); } } public void testNullShardResultHandling() throws Exception { - CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer(SearchProgressListener.NOOP, 10); - try { + try ( + CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer( + SearchProgressListener.NOOP, + 10 + ) + ) { AtomicInteger nextCounter = new AtomicInteger(0); for (int i = 0; i < 10; i++) { SearchShardTarget searchShardTarget = new SearchShardTarget("node", new ShardId("index", "uuid", i), null); @@ -79,20 +82,20 @@ public void testNullShardResultHandling() throws Exception { assertEquals(TotalHits.Relation.EQUAL_TO, reducePhase.totalHits().relation); assertFalse(reducePhase.isEmptyResult()); assertEquals(10, nextCounter.get()); - } finally { - queryPhaseResultConsumer.decRef(); } } public void testEmptyResults() throws Exception { - CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer(SearchProgressListener.NOOP, 10); - try { + try ( + CountOnlyQueryPhaseResultConsumer queryPhaseResultConsumer = new CountOnlyQueryPhaseResultConsumer( + SearchProgressListener.NOOP, + 10 + ) + ) { var reducePhase = queryPhaseResultConsumer.reduce(); assertEquals(0, reducePhase.totalHits().value); assertEquals(TotalHits.Relation.EQUAL_TO, reducePhase.totalHits().relation); assertTrue(reducePhase.isEmptyResult()); - } finally { - queryPhaseResultConsumer.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java b/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java index 838e13d6026c7..bc31f5f92f9b5 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java @@ -23,8 +23,7 @@ public class CountedCollectorTests extends ESTestCase { public void testCollect() throws InterruptedException { - ArraySearchPhaseResults consumer = new ArraySearchPhaseResults<>(randomIntBetween(1, 100)); - try { + try (ArraySearchPhaseResults consumer = new ArraySearchPhaseResults<>(randomIntBetween(1, 100))) { List state = new ArrayList<>(); int numResultsExpected = randomIntBetween(1, consumer.getAtomicArray().length()); MockSearchPhaseContext context = new MockSearchPhaseContext(consumer.getAtomicArray().length()); @@ -93,8 +92,6 @@ public void testCollect() throws InterruptedException { for (int i = numResultsExpected; i < results.length(); i++) { assertNull("index: " + i, results.get(i)); } - } finally { - consumer.decRef(); } } } diff --git a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java index b14d24cf95f62..1736449f7cbdf 100644 --- a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java @@ -125,16 +125,17 @@ public void sendExecuteQuery( SearchPhaseController searchPhaseController = searchPhaseController(); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); mockSearchPhaseContext.searchTransport = searchTransportService; - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.searchRequest, - results.length(), - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.searchRequest, + results.length(), + exc -> {} + ) + ) { DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { @Override public void run() throws IOException { @@ -155,8 +156,6 @@ public void run() throws IOException { assertEquals(84, responseRef.get().get(1).queryResult().topDocs().topDocs.scoreDocs[0].doc); assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); assertEquals(2, mockSearchPhaseContext.numSuccess.get()); - } finally { - consumer.decRef(); } } @@ -211,16 +210,17 @@ public void sendExecuteQuery( SearchPhaseController searchPhaseController = searchPhaseController(); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); mockSearchPhaseContext.searchTransport = searchTransportService; - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.searchRequest, - results.length(), - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.searchRequest, + results.length(), + exc -> {} + ) + ) { DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { @Override public void run() throws IOException { @@ -243,8 +243,6 @@ public void run() throws IOException { assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size()); assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(new ShardSearchContextId("", 2L))); assertNull(responseRef.get().get(1)); - } finally { - consumer.decRef(); } } @@ -299,16 +297,17 @@ public void sendExecuteQuery( SearchPhaseController searchPhaseController = searchPhaseController(); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); mockSearchPhaseContext.searchTransport = searchTransportService; - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.searchRequest, - results.length(), - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.searchRequest, + results.length(), + exc -> {} + ) + ) { DfsQueryPhase phase = new DfsQueryPhase(results.asList(), null, null, consumer, (response) -> new SearchPhase("test") { @Override public void run() throws IOException { @@ -320,8 +319,6 @@ public void run() throws IOException { assertThat(mockSearchPhaseContext.failures, hasSize(1)); assertThat(mockSearchPhaseContext.failures.get(0).getCause(), instanceOf(UncheckedIOException.class)); assertThat(mockSearchPhaseContext.releasedSearchContexts, hasSize(1)); // phase execution will clean up on the contexts - } finally { - consumer.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index 648cb8aa60158..0a98b12444f9c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -42,8 +42,8 @@ public void testCollapseSingleHit() throws IOException { final int numInnerHits = randomIntBetween(1, 5); List collapsedHits = new ArrayList<>(numInnerHits); for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { - SearchHits hits = new SearchHits( - new SearchHit[] { new SearchHit(innerHitNum, "ID"), new SearchHit(innerHitNum + 1, "ID") }, + SearchHits hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(innerHitNum, "ID"), SearchHit.unpooled(innerHitNum + 1, "ID") }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1.0F ); @@ -98,6 +98,8 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL sections.decRef(); } mSearchResponses.add(new MultiSearchResponse.Item(mockSearchPhaseContext.searchResponse.get(), null)); + // transferring ownership to the multi-search response so no need to release here + mockSearchPhaseContext.searchResponse.set(null); } ActionListener.respondAndRelease( @@ -110,37 +112,43 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL SearchHit hit = new SearchHit(1, "ID"); hit.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(collapseValue))); SearchHits hits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { - @Override - public void run() { - var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); - try { - mockSearchPhaseContext.sendSearchResponse(sections, null); - } finally { - sections.decRef(); + try { + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { + @Override + public void run() { + var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); + try { + mockSearchPhaseContext.sendSearchResponse(sections, null); + } finally { + sections.decRef(); + } } - } - }); + }); - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - SearchResponse theResponse = mockSearchPhaseContext.searchResponse.get(); - assertNotNull(theResponse); - assertEquals(numInnerHits, theResponse.getHits().getHits()[0].getInnerHits().size()); + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + SearchResponse theResponse = mockSearchPhaseContext.searchResponse.get(); + assertNotNull(theResponse); + assertEquals(numInnerHits, theResponse.getHits().getHits()[0].getInnerHits().size()); - for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { - assertSame( - theResponse.getHits().getHits()[0].getInnerHits().get("innerHit" + innerHitNum), - collapsedHits.get(innerHitNum) - ); - } + for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { + assertSame( + theResponse.getHits().getHits()[0].getInnerHits().get("innerHit" + innerHitNum), + collapsedHits.get(innerHitNum) + ); + } - assertTrue(executedMultiSearch.get()); + assertTrue(executedMultiSearch.get()); + } finally { + hits.decRef(); + } } finally { + mockSearchPhaseContext.execute(() -> {}); var resp = mockSearchPhaseContext.searchResponse.get(); if (resp != null) { resp.decRef(); } + } } } @@ -198,22 +206,28 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL SearchHit hit2 = new SearchHit(2, "ID2"); hit2.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(collapseValue))); SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { - @Override - public void run() { - var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); - try { - mockSearchPhaseContext.sendSearchResponse(sections, null); - } finally { - sections.decRef(); + try { + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { + @Override + public void run() { + var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); + try { + mockSearchPhaseContext.sendSearchResponse(sections, null); + } finally { + sections.decRef(); + } } - } - }); - phase.run(); - assertThat(mockSearchPhaseContext.phaseFailure.get(), Matchers.instanceOf(RuntimeException.class)); - assertEquals("boom", mockSearchPhaseContext.phaseFailure.get().getMessage()); - assertNotNull(mockSearchPhaseContext.phaseFailure.get()); - assertNull(mockSearchPhaseContext.searchResponse.get()); + }); + phase.run(); + assertThat(mockSearchPhaseContext.phaseFailure.get(), Matchers.instanceOf(RuntimeException.class)); + assertEquals("boom", mockSearchPhaseContext.phaseFailure.get().getMessage()); + assertNotNull(mockSearchPhaseContext.phaseFailure.get()); + assertNull(mockSearchPhaseContext.searchResponse.get()); + } finally { + mockSearchPhaseContext.execute(() -> {}); + hits.decRef(); + collapsedHits.decRef(); + } } public void testSkipPhase() throws IOException { @@ -231,21 +245,26 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL SearchHit hit2 = new SearchHit(2, "ID2"); hit2.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(null))); SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { - @Override - public void run() { - var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); - try { - mockSearchPhaseContext.sendSearchResponse(sections, null); - } finally { - sections.decRef(); + try { + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { + @Override + public void run() { + var sections = new SearchResponseSections(hits, null, null, false, null, null, 1); + try { + mockSearchPhaseContext.sendSearchResponse(sections, null); + } finally { + sections.decRef(); + } } - } - }); - phase.run(); - mockSearchPhaseContext.assertNoFailure(); - assertNotNull(mockSearchPhaseContext.searchResponse.get()); + }); + phase.run(); + mockSearchPhaseContext.assertNoFailure(); + assertNotNull(mockSearchPhaseContext.searchResponse.get()); + } finally { + hits.decRef(); + } } finally { + mockSearchPhaseContext.execute(() -> {}); var resp = mockSearchPhaseContext.searchResponse.get(); if (resp != null) { resp.decRef(); @@ -328,6 +347,7 @@ public void run() { phase.run(); mockSearchPhaseContext.assertNoFailure(); assertNotNull(mockSearchPhaseContext.searchResponse.get()); + mockSearchPhaseContext.execute(() -> {}); } finally { var resp = mockSearchPhaseContext.searchResponse.get(); if (resp != null) { diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java index 035d01108d655..95a4efcca5fa2 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java @@ -52,9 +52,11 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL phase.run(); } finally { sections.decRef(); + hits.decRef(); } searchPhaseContext.assertNoFailure(); assertNotNull(searchPhaseContext.searchResponse.get()); + searchPhaseContext.execute(() -> {}); } finally { var resp = searchPhaseContext.searchResponse.get(); if (resp != null) { @@ -126,6 +128,7 @@ void sendExecuteMultiSearch( ), null ); + searchHits.decRef(); } ActionListener.respondAndRelease(listener, new MultiSearchResponse(responses, randomNonNegativeLong())); } @@ -192,6 +195,7 @@ void sendExecuteMultiSearch( phase.run(); } finally { sections.decRef(); + searchHits.decRef(); } assertTrue(requestSent.get()); searchPhaseContext.assertNoFailure(); @@ -220,6 +224,7 @@ void sendExecuteMultiSearch( leftHit1.field("lookup_field_3").getValues(), contains(Map.of("field_a", List.of("a2"), "field_b", List.of("b1", "b2"))) ); + searchPhaseContext.execute(() -> {}); } finally { var resp = searchPhaseContext.searchResponse.get(); if (resp != null) { diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index 4594810da575a..a2c5bed51f5e7 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -50,16 +50,17 @@ public class FetchSearchPhaseTests extends ESTestCase { public void testShortcutQueryAndFetchOptimization() { SearchPhaseController controller = new SearchPhaseController((t, s) -> InternalAggregationTestCase.emptyReduceContextBuilder()); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); - SearchPhaseResults results = controller.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.getRequest(), - 1, - exc -> {} - ); - try { + try ( + SearchPhaseResults results = controller.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 1, + exc -> {} + ) + ) { boolean hasHits = randomBoolean(); boolean profiled = hasHits && randomBoolean(); final int numHits; @@ -78,8 +79,8 @@ public void testShortcutQueryAndFetchOptimization() { FetchSearchResult fetchResult = new FetchSearchResult(); try { fetchResult.setSearchShardTarget(queryResult.getSearchShardTarget()); - SearchHits hits = new SearchHits( - new SearchHit[] { new SearchHit(42) }, + SearchHits hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(42) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F ); @@ -126,7 +127,6 @@ public void run() { if (resp != null) { resp.decRef(); } - results.decRef(); } } @@ -144,16 +144,17 @@ private void assertProfiles(boolean profiled, int totalShards, SearchResponse se public void testFetchTwoDocument() { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); SearchPhaseController controller = new SearchPhaseController((t, s) -> InternalAggregationTestCase.emptyReduceContextBuilder()); - SearchPhaseResults results = controller.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.getRequest(), - 2, - exc -> {} - ); - try { + try ( + SearchPhaseResults results = controller.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 2, + exc -> {} + ) + ) { int resultSetSize = randomIntBetween(2, 10); boolean profiled = randomBoolean(); @@ -209,16 +210,16 @@ public void sendExecuteFetch( SearchHits hits; if (request.contextId().equals(ctx2)) { fetchResult.setSearchShardTarget(shard2Target); - hits = new SearchHits( - new SearchHit[] { new SearchHit(84) }, + hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(84) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 2.0F ); } else { assertEquals(ctx1, request.contextId()); fetchResult.setSearchShardTarget(shard1Target); - hits = new SearchHits( - new SearchHit[] { new SearchHit(42) }, + hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(42) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F ); @@ -258,23 +259,23 @@ public void run() { if (resp != null) { resp.decRef(); } - results.decRef(); } } public void testFailFetchOneDoc() { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); SearchPhaseController controller = new SearchPhaseController((t, s) -> InternalAggregationTestCase.emptyReduceContextBuilder()); - SearchPhaseResults results = controller.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.getRequest(), - 2, - exc -> {} - ); - try { + try ( + SearchPhaseResults results = controller.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 2, + exc -> {} + ) + ) { int resultSetSize = randomIntBetween(2, 10); boolean profiled = randomBoolean(); @@ -327,8 +328,8 @@ public void sendExecuteFetch( FetchSearchResult fetchResult = new FetchSearchResult(); try { fetchResult.setSearchShardTarget(shard1Target); - SearchHits hits = new SearchHits( - new SearchHit[] { new SearchHit(84) }, + SearchHits hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(84) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 2.0F ); @@ -386,7 +387,6 @@ public void run() { if (resp != null) { resp.decRef(); } - results.decRef(); } } @@ -397,16 +397,17 @@ public void testFetchDocsConcurrently() throws InterruptedException { boolean profiled = randomBoolean(); SearchPhaseController controller = new SearchPhaseController((t, s) -> InternalAggregationTestCase.emptyReduceContextBuilder()); MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(numHits); - SearchPhaseResults results = controller.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.getRequest(), - numHits, - exc -> {} - ); - try { + try ( + SearchPhaseResults results = controller.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + numHits, + exc -> {} + ) + ) { SearchShardTarget[] shardTargets = new SearchShardTarget[numHits]; for (int i = 0; i < numHits; i++) { shardTargets[i] = new SearchShardTarget("node1", new ShardId("test", "na", i), null); @@ -439,8 +440,8 @@ public void sendExecuteFetch( FetchSearchResult fetchResult = new FetchSearchResult(); try { fetchResult.setSearchShardTarget(shardTargets[(int) request.contextId().getId()]); - SearchHits hits = new SearchHits( - new SearchHit[] { new SearchHit((int) (request.contextId().getId() + 1)) }, + SearchHits hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled((int) (request.contextId().getId() + 1)) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 100F ); @@ -505,23 +506,23 @@ public void run() { if (resp != null) { resp.decRef(); } - results.decRef(); } } public void testExceptionFailsPhase() { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); SearchPhaseController controller = new SearchPhaseController((t, s) -> InternalAggregationTestCase.emptyReduceContextBuilder()); - SearchPhaseResults results = controller.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.getRequest(), - 2, - exc -> {} - ); - try { + try ( + SearchPhaseResults results = controller.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 2, + exc -> {} + ) + ) { int resultSetSize = randomIntBetween(2, 10); boolean profiled = randomBoolean(); @@ -578,16 +579,16 @@ public void sendExecuteFetch( SearchHits hits; if (request.contextId().getId() == 321) { fetchResult.setSearchShardTarget(shard2Target); - hits = new SearchHits( - new SearchHit[] { new SearchHit(84) }, + hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(84) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 2.0F ); } else { fetchResult.setSearchShardTarget(shard1Target); assertEquals(request, 123); - hits = new SearchHits( - new SearchHit[] { new SearchHit(42) }, + hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(42) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F ); @@ -620,23 +621,23 @@ public void run() { if (resp != null) { resp.decRef(); } - results.decRef(); } } public void testCleanupIrrelevantContexts() { // contexts that are not fetched should be cleaned up MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2); SearchPhaseController controller = new SearchPhaseController((t, s) -> InternalAggregationTestCase.emptyReduceContextBuilder()); - SearchPhaseResults results = controller.newSearchPhaseResults( - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - mockSearchPhaseContext.getRequest(), - 2, - exc -> {} - ); - try { + try ( + SearchPhaseResults results = controller.newSearchPhaseResults( + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + mockSearchPhaseContext.getRequest(), + 2, + exc -> {} + ) + ) { int resultSetSize = 1; boolean profiled = randomBoolean(); @@ -689,8 +690,8 @@ public void sendExecuteFetch( try { if (request.contextId().getId() == 321) { fetchResult.setSearchShardTarget(shard1Target); - SearchHits hits = new SearchHits( - new SearchHit[] { new SearchHit(84) }, + SearchHits hits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(84) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 2.0F ); @@ -740,7 +741,6 @@ public void run() { if (resp != null) { resp.decRef(); } - results.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index 1a510058e3bbd..ed807091ae9a2 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -98,6 +98,8 @@ public void sendSearchResponse(SearchResponseSections internalSearchResponse, At searchContextId ) ); + Releasables.close(releasables); + releasables.clear(); if (existing != null) { existing.decRef(); } @@ -147,12 +149,7 @@ public void addReleasable(Releasable releasable) { @Override public void execute(Runnable command) { - try { - command.run(); - } finally { - Releasables.close(releasables); - releasables.clear(); - } + command.run(); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java b/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java index 6035950ca4635..db32213ff97b7 100644 --- a/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java @@ -103,20 +103,21 @@ public void testProgressListenerExceptionsAreCaught() throws Exception { SearchRequest searchRequest = new SearchRequest("index"); searchRequest.setBatchedReduceSize(2); AtomicReference onPartialMergeFailure = new AtomicReference<>(); - QueryPhaseResultConsumer queryPhaseResultConsumer = new QueryPhaseResultConsumer( - searchRequest, - executor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - searchPhaseController, - () -> false, - searchProgressListener, - 10, - e -> onPartialMergeFailure.accumulateAndGet(e, (prev, curr) -> { - curr.addSuppressed(prev); - return curr; - }) - ); - try { + try ( + QueryPhaseResultConsumer queryPhaseResultConsumer = new QueryPhaseResultConsumer( + searchRequest, + executor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + searchPhaseController, + () -> false, + searchProgressListener, + 10, + e -> onPartialMergeFailure.accumulateAndGet(e, (prev, curr) -> { + curr.addSuppressed(prev); + return curr; + }) + ) + ) { CountDownLatch partialReduceLatch = new CountDownLatch(10); @@ -137,8 +138,6 @@ public void testProgressListenerExceptionsAreCaught() throws Exception { queryPhaseResultConsumer.reduce(); assertEquals(1, searchProgressListener.onFinalReduce.get()); - } finally { - queryPhaseResultConsumer.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java index 30e634314e0ba..cb41a03216dc5 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTests.java @@ -199,8 +199,7 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { Map aliasFilters = Collections.singletonMap("_na_", AliasFilter.EMPTY); CountDownLatch awaitInitialRequests = new CountDownLatch(1); AtomicInteger numRequests = new AtomicInteger(0); - var results = new ArraySearchPhaseResults(shardsIter.size()); - try { + try (var results = new ArraySearchPhaseResults(shardsIter.size())) { AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction<>( "test", logger, @@ -271,8 +270,6 @@ public void run() { latch.await(); assertTrue(searchPhaseDidRun.get()); assertEquals(numShards, numRequests.get()); - } finally { - results.decRef(); } } @@ -314,9 +311,8 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, Runtime.getRuntime().availableProcessors())); final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean latchTriggered = new AtomicBoolean(); - var results = new ArraySearchPhaseResults(shardsIter.size()); final TestSearchResponse testResponse = new TestSearchResponse(); - try { + try (var results = new ArraySearchPhaseResults(shardsIter.size())) { AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction<>( "test", logger, @@ -395,7 +391,6 @@ public void run() { assertThat(runnables, equalTo(Collections.emptyList())); } finally { testResponse.decRef(); - results.decRef(); } } @@ -550,8 +545,7 @@ public void testAllowPartialResults() throws InterruptedException { Map aliasFilters = Collections.singletonMap("_na_", AliasFilter.EMPTY); AtomicInteger numRequests = new AtomicInteger(0); AtomicInteger numFailReplicas = new AtomicInteger(0); - var results = new ArraySearchPhaseResults(shardsIter.size()); - try { + try (var results = new ArraySearchPhaseResults(shardsIter.size())) { AbstractSearchAsyncAction asyncAction = new AbstractSearchAsyncAction<>( "test", logger, @@ -620,8 +614,6 @@ public void run() { assertTrue(searchPhaseDidRun.get()); assertEquals(numShards, numRequests.get()); assertThat(numFailReplicas.get(), greaterThanOrEqualTo(1)); - } finally { - results.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index ac88f999adef6..1f81ad2a02e8c 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -558,7 +558,7 @@ private static AtomicArray generateFetchResults( List searchHits = new ArrayList<>(); for (ScoreDoc scoreDoc : mergedSearchDocs) { if (scoreDoc.shardIndex == shardIndex) { - searchHits.add(new SearchHit(scoreDoc.doc, "")); + searchHits.add(SearchHit.unpooled(scoreDoc.doc, "")); if (scoreDoc.score > maxScore) { maxScore = scoreDoc.score; } @@ -570,7 +570,7 @@ private static AtomicArray generateFetchResults( for (CompletionSuggestion.Entry.Option option : ((CompletionSuggestion) suggestion).getOptions()) { ScoreDoc doc = option.getDoc(); if (doc.shardIndex == shardIndex) { - searchHits.add(new SearchHit(doc.doc, "")); + searchHits.add(SearchHit.unpooled(doc.doc, "")); if (doc.score > maxScore) { maxScore = doc.score; } @@ -583,7 +583,10 @@ private static AtomicArray generateFetchResults( ProfileResult profileResult = profile && searchHits.size() > 0 ? new ProfileResult("fetch", "fetch", Map.of(), Map.of(), randomNonNegativeLong(), List.of()) : null; - fetchSearchResult.shardResult(new SearchHits(hits, new TotalHits(hits.length, Relation.EQUAL_TO), maxScore), profileResult); + fetchSearchResult.shardResult( + SearchHits.unpooled(hits, new TotalHits(hits.length, Relation.EQUAL_TO), maxScore), + profileResult + ); fetchResults.set(shardIndex, fetchSearchResult); } return fetchResults; @@ -610,16 +613,17 @@ private void consumerTestCase(int numEmptyResponses) throws Exception { SearchRequest request = randomSearchRequest(); request.source(new SearchSourceBuilder().aggregation(new MaxAggregationBuilder("test"))); request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - 3 + numEmptyResponses, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + 3 + numEmptyResponses, + exc -> {} + ) + ) { if (numEmptyResponses == 0) { assertEquals(0, reductions.size()); } @@ -723,8 +727,6 @@ private void consumerTestCase(int numEmptyResponses) throws Exception { assertNull(reduce.sortedTopDocs().sortFields()); assertNull(reduce.sortedTopDocs().collapseField()); assertNull(reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -735,16 +737,17 @@ public void testConsumerConcurrently() throws Exception { SearchRequest request = randomSearchRequest(); request.source(new SearchSourceBuilder().aggregation(new MaxAggregationBuilder("test"))); request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> {} + ) + ) { AtomicInteger max = new AtomicInteger(); Thread[] threads = new Thread[expectedNumResults]; CountDownLatch latch = new CountDownLatch(expectedNumResults); @@ -797,8 +800,6 @@ public void testConsumerConcurrently() throws Exception { assertNull(reduce.sortedTopDocs().sortFields()); assertNull(reduce.sortedTopDocs().collapseField()); assertNull(reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -808,16 +809,17 @@ public void testConsumerOnlyAggs() throws Exception { SearchRequest request = randomSearchRequest(); request.source(new SearchSourceBuilder().aggregation(new MaxAggregationBuilder("test")).size(0)); request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> {} + ) + ) { AtomicInteger max = new AtomicInteger(); CountDownLatch latch = new CountDownLatch(expectedNumResults); for (int i = 0; i < expectedNumResults; i++) { @@ -857,8 +859,6 @@ public void testConsumerOnlyAggs() throws Exception { assertNull(reduce.sortedTopDocs().sortFields()); assertNull(reduce.sortedTopDocs().collapseField()); assertNull(reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -870,16 +870,18 @@ public void testConsumerOnlyHits() throws Exception { request.source(new SearchSourceBuilder().size(randomIntBetween(1, 10))); } request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> {} - ); - try { + + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> {} + ) + ) { AtomicInteger max = new AtomicInteger(); CountDownLatch latch = new CountDownLatch(expectedNumResults); for (int i = 0; i < expectedNumResults; i++) { @@ -916,8 +918,6 @@ public void testConsumerOnlyHits() throws Exception { assertNull(reduce.sortedTopDocs().sortFields()); assertNull(reduce.sortedTopDocs().collapseField()); assertNull(reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -937,16 +937,17 @@ public void testReduceTopNWithFromOffset() throws Exception { SearchRequest request = new SearchRequest(); request.source(new SearchSourceBuilder().size(5).from(5)); request.setBatchedReduceSize(randomIntBetween(2, 4)); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - 4, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + 4, + exc -> {} + ) + ) { int score = 100; CountDownLatch latch = new CountDownLatch(4); for (int i = 0; i < 4; i++) { @@ -984,8 +985,6 @@ public void testReduceTopNWithFromOffset() throws Exception { assertEquals(93.0f, scoreDocs[2].score, 0.0f); assertEquals(92.0f, scoreDocs[3].score, 0.0f); assertEquals(91.0f, scoreDocs[4].score, 0.0f); - } finally { - consumer.decRef(); } } @@ -995,16 +994,17 @@ public void testConsumerSortByField() throws Exception { SearchRequest request = randomSearchRequest(); int size = randomIntBetween(1, 10); request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> {} + ) + ) { AtomicInteger max = new AtomicInteger(); SortField[] sortFields = { new SortField("field", SortField.Type.INT, true) }; DocValueFormat[] docValueFormats = { DocValueFormat.RAW }; @@ -1040,8 +1040,6 @@ public void testConsumerSortByField() throws Exception { assertEquals(SortField.Type.INT, reduce.sortedTopDocs().sortFields()[0].getType()); assertNull(reduce.sortedTopDocs().collapseField()); assertNull(reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -1051,16 +1049,17 @@ public void testConsumerFieldCollapsing() throws Exception { SearchRequest request = randomSearchRequest(); int size = randomIntBetween(5, 10); request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> {} + ) + ) { SortField[] sortFields = { new SortField("field", SortField.Type.STRING) }; BytesRef a = new BytesRef("a"); BytesRef b = new BytesRef("b"); @@ -1100,8 +1099,6 @@ public void testConsumerFieldCollapsing() throws Exception { assertEquals(SortField.Type.STRING, reduce.sortedTopDocs().sortFields()[0].getType()); assertEquals("field", reduce.sortedTopDocs().collapseField()); assertArrayEquals(collapseValues, reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -1110,16 +1107,17 @@ public void testConsumerSuggestions() throws Exception { int bufferSize = randomIntBetween(2, 200); SearchRequest request = randomSearchRequest(); request.setBatchedReduceSize(bufferSize); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> {} + ) + ) { int maxScoreTerm = -1; int maxScorePhrase = -1; int maxScoreCompletion = -1; @@ -1216,8 +1214,6 @@ public void testConsumerSuggestions() throws Exception { assertNull(reduce.sortedTopDocs().sortFields()); assertNull(reduce.sortedTopDocs().collapseField()); assertNull(reduce.sortedTopDocs().collapseValues()); - } finally { - consumer.decRef(); } } @@ -1257,16 +1253,17 @@ public void onFinalReduce(List shards, TotalHits totalHits, Interna assertEquals(numReduceListener.incrementAndGet(), reducePhase); } }; - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - progressListener, - request, - expectedNumResults, - exc -> {} - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + progressListener, + request, + expectedNumResults, + exc -> {} + ) + ) { AtomicInteger max = new AtomicInteger(); Thread[] threads = new Thread[expectedNumResults]; CountDownLatch latch = new CountDownLatch(expectedNumResults); @@ -1324,8 +1321,6 @@ public void onFinalReduce(List shards, TotalHits totalHits, Interna assertEquals(expectedNumResults, numQueryResultListener.get()); assertEquals(0, numQueryFailureListener.get()); assertEquals(numReduceListener.get(), reduce.numReducePhases()); - } finally { - consumer.decRef(); } } } @@ -1348,16 +1343,17 @@ private void testReduceCase(int numShards, int bufferSize, boolean shouldFail) t if (shouldFailPartial) { circuitBreaker.shouldBreak.set(true); } - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - circuitBreaker, - () -> false, - SearchProgressListener.NOOP, - request, - numShards, - exc -> hasConsumedFailure.set(true) - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + circuitBreaker, + () -> false, + SearchProgressListener.NOOP, + request, + numShards, + exc -> hasConsumedFailure.set(true) + ) + ) { CountDownLatch latch = new CountDownLatch(numShards); Thread[] threads = new Thread[numShards]; for (int i = 0; i < numShards; i++) { @@ -1406,8 +1402,6 @@ private void testReduceCase(int numShards, int bufferSize, boolean shouldFail) t } else { consumer.reduce(); } - } finally { - consumer.decRef(); } assertThat(circuitBreaker.allocated, equalTo(0L)); } @@ -1420,16 +1414,17 @@ public void testFailConsumeAggs() throws Exception { request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")).size(0)); request.setBatchedReduceSize(bufferSize); AtomicBoolean hasConsumedFailure = new AtomicBoolean(); - SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( - fixedExecutor, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - () -> false, - SearchProgressListener.NOOP, - request, - expectedNumResults, - exc -> hasConsumedFailure.set(true) - ); - try { + try ( + SearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults( + fixedExecutor, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + () -> false, + SearchProgressListener.NOOP, + request, + expectedNumResults, + exc -> hasConsumedFailure.set(true) + ) + ) { for (int i = 0; i < expectedNumResults; i++) { final int index = i; QuerySearchResult result = new QuerySearchResult( @@ -1454,8 +1449,6 @@ public void testFailConsumeAggs() throws Exception { } } assertNull(consumer.reduce().aggregations()); - } finally { - consumer.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java index 760070979077d..aef472928923b 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -187,17 +187,18 @@ public void sendExecuteQuery( searchRequest.allowPartialSearchResults(false); SearchPhaseController controller = new SearchPhaseController((t, r) -> InternalAggregationTestCase.emptyReduceContextBuilder()); SearchTask task = new SearchTask(0, "n/a", "n/a", () -> "test", null, Collections.emptyMap()); - QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( - searchRequest, - EsExecutors.DIRECT_EXECUTOR_SERVICE, - new NoopCircuitBreaker(CircuitBreaker.REQUEST), - controller, - task::isCancelled, - task.getProgressListener(), - shardsIter.size(), - exc -> {} - ); - try { + try ( + QueryPhaseResultConsumer resultConsumer = new QueryPhaseResultConsumer( + searchRequest, + EsExecutors.DIRECT_EXECUTOR_SERVICE, + new NoopCircuitBreaker(CircuitBreaker.REQUEST), + controller, + task::isCancelled, + task.getProgressListener(), + shardsIter.size(), + exc -> {} + ) + ) { SearchQueryThenFetchAsyncAction action = new SearchQueryThenFetchAsyncAction( logger, null, @@ -252,8 +253,6 @@ public void run() { assertThat(phase.sortedTopDocs().scoreDocs()[0], instanceOf(FieldDoc.class)); assertThat(((FieldDoc) phase.sortedTopDocs().scoreDocs()[0]).fields.length, equalTo(1)); assertThat(((FieldDoc) phase.sortedTopDocs().scoreDocs()[0]).fields[0], equalTo(0)); - } finally { - resultConsumer.decRef(); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java index 0d34634df5ec4..0070d61a2adcb 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java @@ -395,7 +395,7 @@ public void testMergeCompletionSuggestions() throws InterruptedException { i, Collections.emptyMap() ); - SearchHit hit = new SearchHit(docId); + SearchHit hit = SearchHit.unpooled(docId); ShardId shardId = new ShardId( randomAlphaOfLengthBetween(5, 10), randomAlphaOfLength(10), @@ -481,7 +481,7 @@ public void testMergeCompletionSuggestionsTieBreak() throws InterruptedException 1F, Collections.emptyMap() ); - SearchHit searchHit = new SearchHit(docId); + SearchHit searchHit = SearchHit.unpooled(docId); searchHit.shard( new SearchShardTarget( "node", @@ -826,6 +826,7 @@ public void testMergeSearchHits() throws InterruptedException { try { addResponse(searchResponseMerger, searchResponse); } finally { + searchHits.decRef(); searchResponse.decRef(); } } @@ -972,6 +973,7 @@ public void testMergeEmptySearchHitsWithNonEmpty() { try { merger.add(searchResponse); } finally { + searchHits.decRef(); searchResponse.decRef(); } } @@ -1158,7 +1160,7 @@ public void testPartialAggsMixedWithFullResponses() { int successful = 2; int skipped = 1; Index[] indices = new Index[] { new Index("foo_idx", "1bba9f5b-c5a1-4664-be1b-26be590c1aff") }; - SearchResponse searchResponseRemote1 = new SearchResponse( + final SearchResponse searchResponseRemote1 = new SearchResponse( createSimpleDeterministicSearchHits(clusterAlias, indices), createDeterminsticAggregation(maxAggName, rangeAggName, value, count), null, @@ -1199,268 +1201,282 @@ public void testPartialAggsMixedWithFullResponses() { ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY ); - - SearchResponse.Clusters clusters = SearchResponseTests.createCCSClusterObject(3, 2, true, 2, 1, 0, 0, new ShardSearchFailure[0]); - - // merge partial aggs with remote1, check, then merge in remote2, check - try ( - SearchResponseMerger searchResponseMerger = new SearchResponseMerger( + try { + SearchResponse.Clusters clusters = SearchResponseTests.createCCSClusterObject( + 3, + 2, + true, + 2, + 1, 0, - 10, - 10, - new SearchTimeProvider(0, 0, () -> 0), - emptyReduceContextBuilder( - new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder(maxAggName)) - .addAggregator(new DateRangeAggregationBuilder(rangeAggName)) + 0, + new ShardSearchFailure[0] + ); + + // merge partial aggs with remote1, check, then merge in remote2, check + try ( + SearchResponseMerger searchResponseMerger = new SearchResponseMerger( + 0, + 10, + 10, + new SearchTimeProvider(0, 0, () -> 0), + emptyReduceContextBuilder( + new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder(maxAggName)) + .addAggregator(new DateRangeAggregationBuilder(rangeAggName)) + ) ) - ) - ) { - searchResponseMerger.add(searchResponsePartialAggs); - searchResponseMerger.add(searchResponseRemote1); - SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); - try { - SearchHits hits = mergedResponse.getHits(); - assertThat(hits.getTotalHits().value, equalTo(2L)); // should be 2 hits from remote1 - SearchHit hit1 = hits.getHits()[0]; - String expectedHit1 = """ - { - "_index" : "remote1:foo_idx", - "_score" : 2.0, - "sort" : [ - 2.0 - ] - }"""; - assertEquals(hit1.toString(), expectedHit1); - - SearchHit hit2 = hits.getHits()[1]; - String expectedHit2 = """ - { - "_index" : "remote1:foo_idx", - "_score" : 1.0, - "sort" : [ - 1.0 - ] - }"""; - assertEquals(hit2.toString(), expectedHit2); - - double expectedMaxValue = 44.44; // value from remote1 - long expectedBucketsDocCount = 33 + 44; - Max max = mergedResponse.getAggregations().get(maxAggName); - assertEquals(expectedMaxValue, max.value(), 0d); - Range range = mergedResponse.getAggregations().get(rangeAggName); - assertEquals(1, range.getBuckets().size()); - Range.Bucket bucket = range.getBuckets().get(0); - assertEquals("0.0", bucket.getFromAsString()); - assertEquals("10000.0", bucket.getToAsString()); - assertEquals(expectedBucketsDocCount, bucket.getDocCount()); - } finally { - mergedResponse.decRef(); - } + ) { + searchResponseMerger.add(searchResponsePartialAggs); + searchResponseMerger.add(searchResponseRemote1); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + SearchHits hits = mergedResponse.getHits(); + assertThat(hits.getTotalHits().value, equalTo(2L)); // should be 2 hits from remote1 + SearchHit hit1 = hits.getHits()[0]; + String expectedHit1 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit1.toString(), expectedHit1); + + SearchHit hit2 = hits.getHits()[1]; + String expectedHit2 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit2.toString(), expectedHit2); + + double expectedMaxValue = 44.44; // value from remote1 + long expectedBucketsDocCount = 33 + 44; + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(expectedMaxValue, max.value(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(expectedBucketsDocCount, bucket.getDocCount()); + } finally { + mergedResponse.decRef(); + } - searchResponseMerger.add(searchResponseRemote2); - mergedResponse = searchResponseMerger.getMergedResponse(clusters); - try { - SearchHits hits = mergedResponse.getHits(); - assertThat(hits.getTotalHits().value, equalTo(4L)); // should be 2 hits from remote1, 2 from remote2 - - SearchHit hit1 = hits.getHits()[0]; - String expectedHit1 = """ - { - "_index" : "remote1:foo_idx", - "_score" : 2.0, - "sort" : [ - 2.0 - ] - }"""; - assertEquals(hit1.toString(), expectedHit1); - - SearchHit hit2 = hits.getHits()[1]; - String expectedHit2 = """ - { - "_index" : "remote2:foo_idx", - "_score" : 2.0, - "sort" : [ - 2.0 - ] - }"""; - assertEquals(hit2.toString(), expectedHit2); - - SearchHit hit3 = hits.getHits()[2]; - String expectedHit3 = """ - { - "_index" : "remote1:foo_idx", - "_score" : 1.0, - "sort" : [ - 1.0 - ] - }"""; - assertEquals(hit3.toString(), expectedHit3); - - SearchHit hit4 = hits.getHits()[3]; - String expectedHit4 = """ - { - "_index" : "remote2:foo_idx", - "_score" : 1.0, - "sort" : [ - 1.0 - ] - }"""; - assertEquals(hit4.toString(), expectedHit4); - - double expectedMaxValue = 55.55; // value from remote2 - long expectedBucketsDocCount = 33 + 44 + 55; - Max max = mergedResponse.getAggregations().get(maxAggName); - assertEquals(expectedMaxValue, max.value(), 0d); - Range range = mergedResponse.getAggregations().get(rangeAggName); - assertEquals(1, range.getBuckets().size()); - Range.Bucket bucket = range.getBuckets().get(0); - assertEquals("0.0", bucket.getFromAsString()); - assertEquals("10000.0", bucket.getToAsString()); - assertEquals(expectedBucketsDocCount, bucket.getDocCount()); - } finally { - mergedResponse.decRef(); + searchResponseMerger.add(searchResponseRemote2); + mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + SearchHits hits = mergedResponse.getHits(); + assertThat(hits.getTotalHits().value, equalTo(4L)); // should be 2 hits from remote1, 2 from remote2 + + SearchHit hit1 = hits.getHits()[0]; + String expectedHit1 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit1.toString(), expectedHit1); + + SearchHit hit2 = hits.getHits()[1]; + String expectedHit2 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit2.toString(), expectedHit2); + + SearchHit hit3 = hits.getHits()[2]; + String expectedHit3 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit3.toString(), expectedHit3); + + SearchHit hit4 = hits.getHits()[3]; + String expectedHit4 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit4.toString(), expectedHit4); + + double expectedMaxValue = 55.55; // value from remote2 + long expectedBucketsDocCount = 33 + 44 + 55; + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(expectedMaxValue, max.value(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(expectedBucketsDocCount, bucket.getDocCount()); + } finally { + mergedResponse.decRef(); + } } - } - // merge remote1 and remote2, no partial aggs, check, then merge in partial aggs from local, check - try ( - SearchResponseMerger searchResponseMerger = new SearchResponseMerger( - 0, - 10, - 10, - new SearchTimeProvider(0, 0, () -> 0), - emptyReduceContextBuilder( - new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder(maxAggName)) - .addAggregator(new DateRangeAggregationBuilder(rangeAggName)) + // merge remote1 and remote2, no partial aggs, check, then merge in partial aggs from local, check + try ( + SearchResponseMerger searchResponseMerger = new SearchResponseMerger( + 0, + 10, + 10, + new SearchTimeProvider(0, 0, () -> 0), + emptyReduceContextBuilder( + new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder(maxAggName)) + .addAggregator(new DateRangeAggregationBuilder(rangeAggName)) + ) ) - ) - ) { - searchResponseMerger.add(searchResponseRemote2); - searchResponseMerger.add(searchResponseRemote1); - SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); - try { - SearchHits hits = mergedResponse.getHits(); - SearchHit hit1 = hits.getHits()[0]; - String expectedHit1 = """ - { - "_index" : "remote1:foo_idx", - "_score" : 2.0, - "sort" : [ - 2.0 - ] - }"""; - assertEquals(hit1.toString(), expectedHit1); - - SearchHit hit2 = hits.getHits()[1]; - String expectedHit2 = """ - { - "_index" : "remote2:foo_idx", - "_score" : 2.0, - "sort" : [ - 2.0 - ] - }"""; - assertEquals(hit2.toString(), expectedHit2); - - SearchHit hit3 = hits.getHits()[2]; - String expectedHit3 = """ - { - "_index" : "remote1:foo_idx", - "_score" : 1.0, - "sort" : [ - 1.0 - ] - }"""; - assertEquals(hit3.toString(), expectedHit3); - - SearchHit hit4 = hits.getHits()[3]; - String expectedHit4 = """ - { - "_index" : "remote2:foo_idx", - "_score" : 1.0, - "sort" : [ - 1.0 - ] - }"""; - assertEquals(hit4.toString(), expectedHit4); - - double expectedMaxValue = 55.55; // value from remote2 - long expectedBucketsDocCount = 44 + 55; // missing 33 from local partial aggs - Max max = mergedResponse.getAggregations().get(maxAggName); - assertEquals(expectedMaxValue, max.value(), 0d); - Range range = mergedResponse.getAggregations().get(rangeAggName); - assertEquals(1, range.getBuckets().size()); - Range.Bucket bucket = range.getBuckets().get(0); - assertEquals("0.0", bucket.getFromAsString()); - assertEquals("10000.0", bucket.getToAsString()); - assertEquals(expectedBucketsDocCount, bucket.getDocCount()); - } finally { - mergedResponse.decRef(); - } + ) { + searchResponseMerger.add(searchResponseRemote2); + searchResponseMerger.add(searchResponseRemote1); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + SearchHits hits = mergedResponse.getHits(); + SearchHit hit1 = hits.getHits()[0]; + String expectedHit1 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit1.toString(), expectedHit1); + + SearchHit hit2 = hits.getHits()[1]; + String expectedHit2 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit2.toString(), expectedHit2); + + SearchHit hit3 = hits.getHits()[2]; + String expectedHit3 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit3.toString(), expectedHit3); + + SearchHit hit4 = hits.getHits()[3]; + String expectedHit4 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit4.toString(), expectedHit4); + + double expectedMaxValue = 55.55; // value from remote2 + long expectedBucketsDocCount = 44 + 55; // missing 33 from local partial aggs + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(expectedMaxValue, max.value(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(expectedBucketsDocCount, bucket.getDocCount()); + } finally { + mergedResponse.decRef(); + } - searchResponseMerger.add(searchResponsePartialAggs); - mergedResponse = searchResponseMerger.getMergedResponse(clusters); - try { - SearchHits hits = mergedResponse.getHits(); - assertThat(hits.getTotalHits().value, equalTo(4L)); // should be 2 hits from remote1, 2 from remote2 - - SearchHit hit1 = hits.getHits()[0]; - String expectedHit1 = """ - { - "_index" : "remote1:foo_idx", - "_score" : 2.0, - "sort" : [ - 2.0 - ] - }"""; - assertEquals(hit1.toString(), expectedHit1); - - SearchHit hit2 = hits.getHits()[1]; - String expectedHit2 = """ - { - "_index" : "remote2:foo_idx", - "_score" : 2.0, - "sort" : [ - 2.0 - ] - }"""; - assertEquals(hit2.toString(), expectedHit2); - - SearchHit hit3 = hits.getHits()[2]; - String expectedHit3 = """ - { - "_index" : "remote1:foo_idx", - "_score" : 1.0, - "sort" : [ - 1.0 - ] - }"""; - assertEquals(hit3.toString(), expectedHit3); - - SearchHit hit4 = hits.getHits()[3]; - String expectedHit4 = """ - { - "_index" : "remote2:foo_idx", - "_score" : 1.0, - "sort" : [ - 1.0 - ] - }"""; - assertEquals(hit4.toString(), expectedHit4); - - double expectedMaxValue = 55.55; // value from remote2 - long expectedBucketsDocCount = 33 + 44 + 55; // contributions from all 3 search responses - Max max = mergedResponse.getAggregations().get(maxAggName); - assertEquals(expectedMaxValue, max.value(), 0d); - Range range = mergedResponse.getAggregations().get(rangeAggName); - assertEquals(1, range.getBuckets().size()); - Range.Bucket bucket = range.getBuckets().get(0); - assertEquals("0.0", bucket.getFromAsString()); - assertEquals("10000.0", bucket.getToAsString()); - assertEquals(expectedBucketsDocCount, bucket.getDocCount()); - } finally { - mergedResponse.decRef(); + searchResponseMerger.add(searchResponsePartialAggs); + mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + SearchHits hits = mergedResponse.getHits(); + assertThat(hits.getTotalHits().value, equalTo(4L)); // should be 2 hits from remote1, 2 from remote2 + + SearchHit hit1 = hits.getHits()[0]; + String expectedHit1 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit1.toString(), expectedHit1); + + SearchHit hit2 = hits.getHits()[1]; + String expectedHit2 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 2.0, + "sort" : [ + 2.0 + ] + }"""; + assertEquals(hit2.toString(), expectedHit2); + + SearchHit hit3 = hits.getHits()[2]; + String expectedHit3 = """ + { + "_index" : "remote1:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit3.toString(), expectedHit3); + + SearchHit hit4 = hits.getHits()[3]; + String expectedHit4 = """ + { + "_index" : "remote2:foo_idx", + "_score" : 1.0, + "sort" : [ + 1.0 + ] + }"""; + assertEquals(hit4.toString(), expectedHit4); + + double expectedMaxValue = 55.55; // value from remote2 + long expectedBucketsDocCount = 33 + 44 + 55; // contributions from all 3 search responses + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(expectedMaxValue, max.value(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(expectedBucketsDocCount, bucket.getDocCount()); + } finally { + mergedResponse.decRef(); + } } + } finally { + searchResponseRemote1.decRef(); + searchResponseRemote2.decRef(); + searchResponsePartialAggs.decRef(); } } @@ -1475,15 +1491,7 @@ private SearchHits createSimpleDeterministicSearchHits(String clusterAlias, Inde PriorityQueue priorityQueue = new PriorityQueue<>(new SearchHitComparator(sortFields)); SearchHit[] hits = deterministicSearchHitArray(numDocs, clusterAlias, indices, maxScore, scoreFactor, sortFields, priorityQueue); - SearchHits searchHits = new SearchHits( - hits, - totalHits, - maxScore == Float.NEGATIVE_INFINITY ? Float.NaN : maxScore, - sortFields, - null, - null - ); - return searchHits; + return SearchHits.unpooled(hits, totalHits, maxScore == Float.NEGATIVE_INFINITY ? Float.NaN : maxScore, sortFields, null, null); } private static InternalAggregations createDeterminsticAggregation(String maxAggName, String rangeAggName, double value, int count) { @@ -1523,7 +1531,7 @@ private static SearchHit[] deterministicSearchHitArray( for (int j = 0; j < numDocs; j++) { ShardId shardId = new ShardId(randomFrom(indices), j); SearchShardTarget shardTarget = new SearchShardTarget("abc123", shardId, clusterAlias); - SearchHit hit = new SearchHit(j); + SearchHit hit = SearchHit.unpooled(j); float score = Float.NaN; if (Float.isNaN(maxScore) == false) { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index ef759279e095f..0d85d020c4180 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -115,25 +115,29 @@ private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... sha } if (minimal == false) { SearchHits hits = SearchHitsTests.createTestItem(true, true); - InternalAggregations aggregations = aggregationsTests.createTestInstance(); - Suggest suggest = SuggestTests.createTestItem(); - SearchProfileResults profileResults = SearchProfileResultsTests.createTestItem(); - return new SearchResponse( - hits, - aggregations, - suggest, - timedOut, - terminatedEarly, - profileResults, - numReducePhases, - null, - totalShards, - successfulShards, - skippedShards, - tookInMillis, - shardSearchFailures, - clusters - ); + try { + InternalAggregations aggregations = aggregationsTests.createTestInstance(); + Suggest suggest = SuggestTests.createTestItem(); + SearchProfileResults profileResults = SearchProfileResultsTests.createTestItem(); + return new SearchResponse( + hits, + aggregations, + suggest, + timedOut, + terminatedEarly, + profileResults, + numReducePhases, + null, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardSearchFailures, + clusters + ); + } finally { + hits.decRef(); + } } else { return SearchResponseUtils.emptyWithTotalHits( null, @@ -381,9 +385,10 @@ public void testToXContent() throws IOException { SearchHit hit = new SearchHit(1, "id1"); hit.score(2.0f); SearchHit[] hits = new SearchHit[] { hit }; + var sHits = new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f); { SearchResponse response = new SearchResponse( - new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), + sHits, null, null, false, @@ -425,7 +430,7 @@ public void testToXContent() throws IOException { } { SearchResponse response = new SearchResponse( - new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), + sHits, null, null, false, @@ -475,7 +480,7 @@ public void testToXContent() throws IOException { } { SearchResponse response = new SearchResponse( - new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), + sHits, null, null, false, @@ -617,6 +622,7 @@ public void testToXContent() throws IOException { response.decRef(); } } + sHits.decRef(); } public void testSerialization() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java index e9bf6f83f5bbc..40bdc3da37242 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.Index; @@ -143,7 +144,7 @@ public static SearchHit createTestItem(XContentType xContentType, boolean withOp @Override protected Writeable.Reader instanceReader() { - return SearchHit::readFrom; + return in -> SearchHit.readFrom(in, randomBoolean()); } @Override @@ -159,16 +160,20 @@ protected SearchHit mutateInstance(SearchHit instance) { public void testFromXContent() throws IOException { XContentType xContentType = randomFrom(XContentType.values()).canonical(); SearchHit searchHit = createTestItem(xContentType, true, false); - boolean humanReadable = randomBoolean(); - BytesReference originalBytes = toShuffledXContent(searchHit, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); - SearchHit parsed; - try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { - parser.nextToken(); // jump to first START_OBJECT - parsed = SearchHit.fromXContent(parser); - assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); - assertNull(parser.nextToken()); + try { + boolean humanReadable = randomBoolean(); + BytesReference originalBytes = toShuffledXContent(searchHit, xContentType, ToXContent.EMPTY_PARAMS, humanReadable); + SearchHit parsed; + try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + parser.nextToken(); // jump to first START_OBJECT + parsed = SearchHit.fromXContent(parser); + assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); + assertNull(parser.nextToken()); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, humanReadable), xContentType); + } finally { + searchHit.decRef(); } - assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, humanReadable), xContentType); } /** @@ -184,22 +189,26 @@ public void testFromXContent() throws IOException { public void testFromXContentLenientParsing() throws IOException { XContentType xContentType = randomFrom(XContentType.values()); SearchHit searchHit = createTestItem(xContentType, true, true); - BytesReference originalBytes = toXContent(searchHit, xContentType, true); - Predicate pathsToExclude = path -> path.endsWith("highlight") - || path.contains("fields") - || path.contains("_source") - || path.contains("inner_hits") - || path.isEmpty(); - BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, pathsToExclude, random()); - - SearchHit parsed; - try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { - parser.nextToken(); // jump to first START_OBJECT - parsed = SearchHit.fromXContent(parser); - assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); - assertNull(parser.nextToken()); + try { + BytesReference originalBytes = toXContent(searchHit, xContentType, true); + Predicate pathsToExclude = path -> path.endsWith("highlight") + || path.contains("fields") + || path.contains("_source") + || path.contains("inner_hits") + || path.isEmpty(); + BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, pathsToExclude, random()); + + SearchHit parsed; + try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { + parser.nextToken(); // jump to first START_OBJECT + parsed = SearchHit.fromXContent(parser); + assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); + assertNull(parser.nextToken()); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, true), xContentType); + } finally { + searchHit.decRef(); } - assertToXContentEquivalent(originalBytes, toXContent(parsed, xContentType, true), xContentType); } /** @@ -221,15 +230,19 @@ public void testFromXContentWithoutTypeAndId() throws IOException { public void testToXContent() throws IOException { SearchHit searchHit = new SearchHit(1, "id1"); - searchHit.score(1.5f); - XContentBuilder builder = JsonXContent.contentBuilder(); - searchHit.toXContent(builder, ToXContent.EMPTY_PARAMS); - assertEquals(""" - {"_id":"id1","_score":1.5}""", Strings.toString(builder)); + try { + searchHit.score(1.5f); + XContentBuilder builder = JsonXContent.contentBuilder(); + searchHit.toXContent(builder, ToXContent.EMPTY_PARAMS); + assertEquals(""" + {"_id":"id1","_score":1.5}""", Strings.toString(builder)); + } finally { + searchHit.decRef(); + } } public void testRankToXContent() throws IOException { - SearchHit searchHit = new SearchHit(1, "id1"); + SearchHit searchHit = SearchHit.unpooled(1, "id1"); searchHit.setRank(1); XContentBuilder builder = JsonXContent.contentBuilder(); searchHit.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -264,30 +277,42 @@ public void testSerializeShardTarget() throws Exception { hit2.shard(target); SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1f); - - TransportVersion version = TransportVersionUtils.randomVersion(random()); - SearchHits results = copyWriteable(hits, getNamedWriteableRegistry(), SearchHits::new, version); - SearchShardTarget deserializedTarget = results.getAt(0).getShard(); - assertThat(deserializedTarget, equalTo(target)); - assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); - assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); - assertThat(results.getAt(0).getInnerHits().get("1").getAt(1).getShard(), notNullValue()); - assertThat(results.getAt(0).getInnerHits().get("2").getAt(0).getShard(), notNullValue()); - for (SearchHit hit : results) { - assertEquals(clusterAlias, hit.getClusterAlias()); - if (hit.getInnerHits() != null) { - for (SearchHits innerhits : hit.getInnerHits().values()) { - for (SearchHit innerHit : innerhits) { - assertEquals(clusterAlias, innerHit.getClusterAlias()); + try { + TransportVersion version = TransportVersionUtils.randomVersion(random()); + SearchHits results = copyWriteable( + hits, + getNamedWriteableRegistry(), + (StreamInput in) -> SearchHits.readFrom(in, randomBoolean()), + version + ); + try { + SearchShardTarget deserializedTarget = results.getAt(0).getShard(); + assertThat(deserializedTarget, equalTo(target)); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(0).getInnerHits().get("1").getAt(0).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("1").getAt(1).getShard(), notNullValue()); + assertThat(results.getAt(0).getInnerHits().get("2").getAt(0).getShard(), notNullValue()); + for (SearchHit hit : results) { + assertEquals(clusterAlias, hit.getClusterAlias()); + if (hit.getInnerHits() != null) { + for (SearchHits innerhits : hit.getInnerHits().values()) { + for (SearchHit innerHit : innerhits) { + assertEquals(clusterAlias, innerHit.getClusterAlias()); + } + } } } + assertThat(results.getAt(1).getShard(), equalTo(target)); + } finally { + results.decRef(); } + } finally { + hits.decRef(); } - assertThat(results.getAt(1).getShard(), equalTo(target)); } public void testNullSource() { - SearchHit searchHit = new SearchHit(0, "_id"); + SearchHit searchHit = SearchHit.unpooled(0, "_id"); assertThat(searchHit.getSourceAsMap(), nullValue()); assertThat(searchHit.getSourceRef(), nullValue()); @@ -299,7 +324,7 @@ public void testNullSource() { } public void testHasSource() { - SearchHit searchHit = new SearchHit(randomInt()); + SearchHit searchHit = SearchHit.unpooled(randomInt()); assertFalse(searchHit.hasSource()); searchHit.sourceRef(new BytesArray("{}")); assertTrue(searchHit.hasSource()); @@ -376,7 +401,7 @@ public void testToXContentEmptyFields() throws IOException { Map fields = new HashMap<>(); fields.put("foo", new DocumentField("foo", Collections.emptyList())); fields.put("bar", new DocumentField("bar", Collections.emptyList())); - SearchHit hit = new SearchHit(0, "_id"); + SearchHit hit = SearchHit.unpooled(0, "_id"); hit.addDocumentFields(fields, Map.of()); { BytesReference originalBytes = toShuffledXContent(hit, XContentType.JSON, ToXContent.EMPTY_PARAMS, randomBoolean()); @@ -389,13 +414,17 @@ public void testToXContentEmptyFields() throws IOException { assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertNull(parser.nextToken()); } - assertThat(parsed.getFields().size(), equalTo(0)); + try { + assertThat(parsed.getFields().size(), equalTo(0)); + } finally { + parsed.decRef(); + } } fields = new HashMap<>(); fields.put("foo", new DocumentField("foo", Collections.emptyList())); fields.put("bar", new DocumentField("bar", Collections.singletonList("value"))); - hit = new SearchHit(0, "_id"); + hit = SearchHit.unpooled(0, "_id"); hit.addDocumentFields(fields, Collections.emptyMap()); { BytesReference originalBytes = toShuffledXContent(hit, XContentType.JSON, ToXContent.EMPTY_PARAMS, randomBoolean()); @@ -412,7 +441,7 @@ public void testToXContentEmptyFields() throws IOException { Map metadata = new HashMap<>(); metadata.put("_routing", new DocumentField("_routing", Collections.emptyList())); - hit = new SearchHit(0, "_id"); + hit = SearchHit.unpooled(0, "_id"); hit.addDocumentFields(fields, Collections.emptyMap()); { BytesReference originalBytes = toShuffledXContent(hit, XContentType.JSON, ToXContent.EMPTY_PARAMS, randomBoolean()); @@ -427,7 +456,13 @@ public void testToXContentEmptyFields() throws IOException { assertThat(parsed.getFields().get("bar").getValues(), equalTo(Collections.singletonList("value"))); assertNull(parsed.getFields().get("_routing")); } + } + @Override + protected void dispose(SearchHit searchHit) { + if (searchHit != null) { + searchHit.decRef(); + } } static Explanation createExplanation(int depth) { diff --git a/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java b/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java index 1e720064dab56..4ca3c5b8dd46e 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchHitsTests.java @@ -118,7 +118,7 @@ protected SearchHits mutateInstance(SearchHits instance) { } else { totalHits = null; } - return new SearchHits(instance.getHits(), totalHits, instance.getMaxScore()); + return new SearchHits(instance.asUnpooled().getHits(), totalHits, instance.getMaxScore()); case 2: final float maxScore; if (Float.isNaN(instance.getMaxScore())) { @@ -126,7 +126,7 @@ protected SearchHits mutateInstance(SearchHits instance) { } else { maxScore = Float.NaN; } - return new SearchHits(instance.getHits(), instance.getTotalHits(), maxScore); + return new SearchHits(instance.asUnpooled().getHits(), instance.getTotalHits(), maxScore); case 3: SortField[] sortFields; if (instance.getSortFields() == null) { @@ -135,7 +135,7 @@ protected SearchHits mutateInstance(SearchHits instance) { sortFields = randomBoolean() ? createSortFields(instance.getSortFields().length + 1) : null; } return new SearchHits( - instance.getHits(), + instance.asUnpooled().getHits(), instance.getTotalHits(), instance.getMaxScore(), sortFields, @@ -150,7 +150,7 @@ protected SearchHits mutateInstance(SearchHits instance) { collapseField = randomBoolean() ? instance.getCollapseField() + randomAlphaOfLengthBetween(2, 5) : null; } return new SearchHits( - instance.getHits(), + instance.asUnpooled().getHits(), instance.getTotalHits(), instance.getMaxScore(), instance.getSortFields(), @@ -165,7 +165,7 @@ protected SearchHits mutateInstance(SearchHits instance) { collapseValues = randomBoolean() ? createCollapseValues(instance.getCollapseValues().length + 1) : null; } return new SearchHits( - instance.getHits(), + instance.asUnpooled().getHits(), instance.getTotalHits(), instance.getMaxScore(), instance.getSortFields(), @@ -177,6 +177,11 @@ protected SearchHits mutateInstance(SearchHits instance) { } } + @Override + protected void dispose(SearchHits searchHits) { + searchHits.decRef(); + } + @Override protected Predicate getRandomFieldsExcludeFilter() { return path -> (path.isEmpty() @@ -193,7 +198,7 @@ protected String[] getShuffleFieldsExceptions() { @Override protected Writeable.Reader instanceReader() { - return SearchHits::new; + return in -> SearchHits.readFrom(in, randomBoolean()); } @Override @@ -223,15 +228,19 @@ protected SearchHits doParseInstance(XContentParser parser) throws IOException { SearchHits searchHits = SearchHits.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); - return searchHits; + try { + return searchHits.asUnpooled(); + } finally { + searchHits.decRef(); + } } public void testToXContent() throws IOException { - SearchHit[] hits = new SearchHit[] { new SearchHit(1, "id1"), new SearchHit(2, "id2") }; + SearchHit[] hits = new SearchHit[] { SearchHit.unpooled(1, "id1"), SearchHit.unpooled(2, "id2") }; long totalHits = 1000; float maxScore = 1.5f; - SearchHits searchHits = new SearchHits(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), maxScore); + SearchHits searchHits = SearchHits.unpooled(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), maxScore); XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); ChunkedToXContent.wrapAsToXContent(searchHits).toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -251,7 +260,10 @@ public void testToXContent() throws IOException { public void testFromXContentWithShards() throws IOException { for (boolean withExplanation : new boolean[] { true, false }) { - final SearchHit[] hits = new SearchHit[] { new SearchHit(1, "id1"), new SearchHit(2, "id2"), new SearchHit(10, "id10") }; + final SearchHit[] hits = new SearchHit[] { + SearchHit.unpooled(1, "id1"), + SearchHit.unpooled(2, "id2"), + SearchHit.unpooled(10, "id10") }; for (SearchHit hit : hits) { String index = randomAlphaOfLengthBetween(5, 10); @@ -269,7 +281,7 @@ public void testFromXContentWithShards() throws IOException { long totalHits = 1000; float maxScore = 1.5f; - SearchHits searchHits = new SearchHits(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), maxScore); + SearchHits searchHits = SearchHits.unpooled(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), maxScore); XContentType xContentType = randomFrom(XContentType.values()).canonical(); BytesReference bytes = toShuffledXContent( ChunkedToXContent.wrapAsToXContent(searchHits), @@ -304,7 +316,6 @@ public void testFromXContentWithShards() throws IOException { } } } - } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTopHitsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTopHitsTests.java index 7d3799b2db35d..1052987aabbdd 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTopHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalTopHitsTests.java @@ -159,13 +159,13 @@ private InternalTopHits createTestInstance( Map searchHitFields = new HashMap<>(); scoreDocs[i] = docBuilder.apply(docId, score); - hits[i] = new SearchHit(docId, Integer.toString(i)); + hits[i] = SearchHit.unpooled(docId, Integer.toString(i)); hits[i].addDocumentFields(searchHitFields, Collections.emptyMap()); hits[i].score(score); } int totalHits = between(actualSize, 500000); sort(hits, scoreDocs, comparator); - SearchHits searchHits = new SearchHits(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), maxScore); + SearchHits searchHits = SearchHits.unpooled(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), maxScore); TopDocs topDocs = topDocsBuilder.apply(new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), scoreDocs); // Lucene's TopDocs initializes the maxScore to Float.NaN, if there is no maxScore @@ -276,16 +276,20 @@ protected void assertReduced(InternalTopHits reduced, List inpu new TotalHits(totalHits, relation), maxScore == Float.NEGATIVE_INFINITY ? Float.NaN : maxScore ); - assertEqualsWithErrorMessageFromXContent(expectedHits, actualHits); + try { + assertEqualsWithErrorMessageFromXContent(expectedHits, actualHits); + } finally { + expectedHits.decRef(); + } } public void testGetProperty() { // Create a SearchHit containing: { "foo": 1000.0 } and use it to initialize an InternalTopHits instance. - SearchHit hit = new SearchHit(0); + SearchHit hit = SearchHit.unpooled(0); hit = hit.sourceRef(Source.fromMap(Map.of("foo", 1000.0), XContentType.YAML).internalSourceRef()); hit.sortValues(new Object[] { 10.0 }, new DocValueFormat[] { DocValueFormat.RAW }); hit.score(1.0f); - SearchHits hits = new SearchHits(new SearchHit[] { hit }, null, 0); + SearchHits hits = SearchHits.unpooled(new SearchHit[] { hit }, null, 0); InternalTopHits internalTopHits = new InternalTopHits("test", 0, 0, null, hits, null); assertEquals(internalTopHits, internalTopHits.getProperty(Collections.emptyList())); @@ -301,7 +305,7 @@ public void testGetProperty() { expectThrows(IllegalArgumentException.class, () -> internalTopHits.getProperty(List.of("_sort"))); // Two SearchHit instances are not allowed, only the first will be used without assertion. - hits = new SearchHits(new SearchHit[] { hit, hit }, null, 0); + hits = SearchHits.unpooled(new SearchHit[] { hit, hit }, null, 0); InternalTopHits internalTopHits3 = new InternalTopHits("test", 0, 0, null, hits, null); expectThrows(IllegalArgumentException.class, () -> internalTopHits3.getProperty(List.of("foo"))); } @@ -397,7 +401,7 @@ protected InternalTopHits mutateInstance(InternalTopHits instance) { int from = instance.getFrom(); int size = instance.getSize(); TopDocsAndMaxScore topDocs = instance.getTopDocs(); - SearchHits searchHits = instance.getHits(); + SearchHits searchHits = instance.getHits().asUnpooled(); Map metadata = instance.getMetadata(); switch (between(0, 5)) { case 0 -> name += randomAlphaOfLength(5); @@ -415,7 +419,7 @@ protected InternalTopHits mutateInstance(InternalTopHits instance) { searchHits.getTotalHits().value + between(1, 100), randomFrom(TotalHits.Relation.values()) ); - searchHits = new SearchHits(searchHits.getHits(), totalHits, searchHits.getMaxScore() + randomFloat()); + searchHits = SearchHits.unpooled(searchHits.getHits(), totalHits, searchHits.getMaxScore() + randomFloat()); } case 5 -> { if (metadata == null) { diff --git a/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java b/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java index 4c8484be200e5..f8af8a2e3109b 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/FetchPhaseDocsIteratorTests.java @@ -81,6 +81,7 @@ protected SearchHit nextDoc(int doc) { assertThat(hits.length, equalTo(docs.length)); for (int i = 0; i < hits.length; i++) { assertThat(hits[i].docId(), equalTo(docs[i])); + hits[i].decRef(); } reader.close(); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhaseTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhaseTests.java index e0a26fbc67ffd..a5371e7b0b00a 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchFieldsPhaseTests.java @@ -82,7 +82,7 @@ public void testDocValueFetcher() throws IOException { for (LeafReaderContext context : reader.leaves()) { processor.setNextReader(context); for (int doc = 0; doc < context.reader().maxDoc(); doc++) { - SearchHit searchHit = new SearchHit(doc + context.docBase); + SearchHit searchHit = SearchHit.unpooled(doc + context.docBase); processor.process(new FetchSubPhase.HitContext(searchHit, context, doc, Map.of(), Source.empty(null))); assertNotNull(searchHit.getFields().get("field")); } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourcePhaseTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourcePhaseTests.java index 620706a01c88f..3a4d67ae281f2 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourcePhaseTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FetchSourcePhaseTests.java @@ -170,7 +170,7 @@ private HitContext hitExecuteMultiple( when(sec.isSourceEnabled()).thenReturn(sourceBuilder != null); when(fetchContext.getSearchExecutionContext()).thenReturn(sec); - final SearchHit searchHit = new SearchHit(1, null, nestedIdentity); + final SearchHit searchHit = SearchHit.unpooled(1, null, nestedIdentity); // We don't need a real index, just a LeafReaderContext which cannot be mocked. MemoryIndex index = new MemoryIndex(); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java index 7a1751dbd41fc..be36d72304bd0 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/FieldFetcherTests.java @@ -1169,7 +1169,7 @@ public void testNestedGrouping() throws IOException { """; var results = fetchFields(mapperService, source, fieldAndFormatList("*", null, false)); - SearchHit searchHit = new SearchHit(0); + SearchHit searchHit = SearchHit.unpooled(0); searchHit.addDocumentFields(results, Map.of()); assertThat(Strings.toString(searchHit), containsString("\"ml.top_classes\":")); } diff --git a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java index 8a82ae8ce7268..42fe65c8d14ef 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestionOptionTests.java @@ -54,6 +54,9 @@ public static Option createTestItem() { } Option option = new CompletionSuggestion.Entry.Option(docId, text, score, contexts); option.setHit(hit); + if (hit != null) { + hit.decRef(); + } return option; } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 947b894124137..d4ea90ee6412a 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -147,8 +147,8 @@ public static MockTransportService startTransport( } SearchHits searchHits; if ("null_target".equals(request.preference())) { - searchHits = new SearchHits( - new SearchHit[] { new SearchHit(0) }, + searchHits = SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(0) }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1F ); diff --git a/test/framework/src/main/java/org/elasticsearch/search/fetch/HighlighterTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/fetch/HighlighterTestCase.java index a9f1ab7780f7f..526c2104e52ae 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/fetch/HighlighterTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/fetch/HighlighterTestCase.java @@ -73,7 +73,7 @@ protected final Map highlight(MapperService mapperServic Map> storedFields = storedFields(processor.storedFieldsSpec(), doc); Source source = Source.fromBytes(doc.source()); FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext( - new SearchHit(0, "id"), + SearchHit.unpooled(0, "id"), ir.leaves().get(0), 0, storedFields, diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java index 770c56f9c5952..4df1e745f3bf4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java @@ -32,7 +32,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; public abstract class AbstractXContentTestCase extends ESTestCase { - protected static final int NUMBER_OF_TEST_RUNS = 20; + public static final int NUMBER_OF_TEST_RUNS = 20; public static XContentTester xContentTester( CheckedBiFunction createParser, diff --git a/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java b/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java index 1819ad7960006..88ae09fbcdc99 100644 --- a/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java +++ b/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java @@ -192,7 +192,7 @@ private SearchHit[] getSearchHits(String asyncId, String user) throws IOExceptio ) ).getSearchResponse(); try { - return searchResponse.getHits().getHits(); + return searchResponse.getHits().asUnpooled().getHits(); } finally { searchResponse.decRef(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index e8b0041875b07..d819f7d846843 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -334,7 +334,7 @@ public boolean equals(Object obj) { } } - private QueryPage jobsStats; + private final QueryPage jobsStats; public Response(QueryPage jobsStats) { super(Collections.emptyList(), Collections.emptyList()); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java index 94e9033dcca4f..cbede83f15107 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java @@ -303,22 +303,26 @@ private static BytesReference filterSource(FetchSourceContext fetchSourceContext private static SearchResponse createSearchResponse(TopDocs topDocs, SearchHit[] hits) { SearchHits searchHits = new SearchHits(hits, topDocs.totalHits, 0); - return new SearchResponse( - searchHits, - null, - null, - false, - null, - null, - 0, - null, - 1, - 1, - 0, - 1L, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); + try { + return new SearchResponse( + searchHits, + null, + null, + false, + null, + null, + 0, + null, + 1, + 1, + 0, + 1L, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + } finally { + searchHits.decRef(); + } } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java index ecb8ce633d985..011b0d09fd8c5 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java @@ -182,7 +182,8 @@ public static SearchRequest prepareRequest(SearchSourceBuilder source, boolean i } public static List searchHits(SearchResponse response) { - return Arrays.asList(response.getHits().getHits()); + // TODO remove unpooled usage + return Arrays.asList(response.getHits().asUnpooled().getHits()); } /** diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java index f391e9bdae84b..7bb6a228f6e48 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java @@ -74,14 +74,14 @@ public void query(QueryRequest r, ActionListener l) { } long sortValue = implicitTiebreakerValues.get(ordinal); - SearchHit searchHit = new SearchHit(ordinal, String.valueOf(ordinal)); + SearchHit searchHit = SearchHit.unpooled(ordinal, String.valueOf(ordinal)); searchHit.sortValues( new SearchSortValues( new Long[] { (long) ordinal, sortValue }, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW } ) ); - SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); + SearchHits searchHits = SearchHits.unpooled(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); ActionListener.respondAndRelease( l, new SearchResponse(searchHits, null, null, false, false, null, 0, null, 0, 1, 0, 0, null, Clusters.EMPTY) @@ -94,7 +94,7 @@ public void fetchHits(Iterable> refs, ActionListener ref : refs) { List hits = new ArrayList<>(ref.size()); for (HitReference hitRef : ref) { - hits.add(new SearchHit(-1, hitRef.id())); + hits.add(SearchHit.unpooled(-1, hitRef.id())); } searchHits.add(hits); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java index eb417570cb4a7..a8ed842e94c44 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java @@ -188,7 +188,7 @@ static class EventsAsHits { Map documentFields = new HashMap<>(); documentFields.put(KEY_FIELD_NAME, new DocumentField(KEY_FIELD_NAME, Collections.singletonList(value.v1()))); // save the timestamp both as docId (int) and as id (string) - SearchHit searchHit = new SearchHit(entry.getKey(), entry.getKey().toString()); + SearchHit searchHit = SearchHit.unpooled(entry.getKey(), entry.getKey().toString()); searchHit.addDocumentFields(documentFields, Map.of()); hits.add(searchHit); } @@ -215,7 +215,7 @@ public void query(QueryRequest r, ActionListener l) { Map> evs = ordinal != Integer.MAX_VALUE ? events.get(ordinal) : emptyMap(); EventsAsHits eah = new EventsAsHits(evs); - SearchHits searchHits = new SearchHits( + SearchHits searchHits = SearchHits.unpooled( eah.hits.toArray(SearchHits.EMPTY), new TotalHits(eah.hits.size(), Relation.EQUAL_TO), 0.0f @@ -232,7 +232,7 @@ public void fetchHits(Iterable> refs, ActionListener ref : refs) { List hits = new ArrayList<>(ref.size()); for (HitReference hitRef : ref) { - hits.add(new SearchHit(-1, hitRef.id())); + hits.add(SearchHit.unpooled(-1, hitRef.id())); } searchHits.add(hits); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index 9141555fcd613..b880ec4b06926 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.composite.InternalComposite; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -171,8 +172,8 @@ public void fetchHits(Iterable> refs, ActionListener searchHits = new ArrayList<>(); - searchHits.add(new SearchHit(1, String.valueOf(1))); - searchHits.add(new SearchHit(2, String.valueOf(2))); + searchHits.add(SearchHit.unpooled(1, String.valueOf(1))); + searchHits.add(SearchHit.unpooled(2, String.valueOf(2))); return new Sample(new SequenceKey(randomAlphaOfLength(10)), searchHits); } @@ -224,7 +225,7 @@ void handleSearchRequest(ActionListener asSearchHitsList(Integer... docIds) { } List searchHits = new ArrayList<>(docIds.length); for (Integer docId : docIds) { - searchHits.add(new SearchHit(docId, docId.toString())); + searchHits.add(SearchHit.unpooled(docId, docId.toString())); } return searchHits; diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/CriterionOrdinalExtractionTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/CriterionOrdinalExtractionTests.java index b995693458095..f62100a98b066 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/CriterionOrdinalExtractionTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/CriterionOrdinalExtractionTests.java @@ -153,7 +153,7 @@ private SearchHit searchHit(Object timeValue, Object tiebreakerValue, Supplier fields = new HashMap<>(); fields.put(tsField, new DocumentField(tsField, singletonList(timeValue))); fields.put(tbField, new DocumentField(tsField, singletonList(tiebreakerValue))); - SearchHit searchHit = new SearchHit(randomInt(), randomAlphaOfLength(10)); + SearchHit searchHit = SearchHit.unpooled(randomInt(), randomAlphaOfLength(10)); searchHit.addDocumentFields(fields, Map.of()); searchHit.sortValues(searchSortValues.get()); diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java index 9c9bbfcdc5127..0bdb88592ce0f 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java @@ -135,7 +135,7 @@ public void fetchHits(Iterable> refs, ActionListener ref : refs) { List hits = new ArrayList<>(ref.size()); for (HitReference hitRef : ref) { - hits.add(new SearchHit(-1, hitRef.id())); + hits.add(SearchHit.unpooled(-1, hitRef.id())); } searchHits.add(hits); } @@ -236,12 +236,12 @@ protected void @SuppressWarnings("unchecked") void handleSearchRequest(ActionListener listener, SearchRequest searchRequest) { int ordinal = searchRequest.source().terminateAfter(); - SearchHit searchHit = new SearchHit(ordinal, String.valueOf(ordinal)); + SearchHit searchHit = SearchHit.unpooled(ordinal, String.valueOf(ordinal)); searchHit.sortValues( new SearchSortValues(new Long[] { (long) ordinal, 1L }, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW }) ); - SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 0.0f); + SearchHits searchHits = SearchHits.unpooled(new SearchHit[] { searchHit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 0.0f); SearchResponse response = new SearchResponse( searchHits, null, diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java index 7ef2b95d982fb..3097fbbc7f04a 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java @@ -107,11 +107,11 @@ static class TestQueryClient implements QueryClient { @Override public void query(QueryRequest r, ActionListener l) { int ordinal = r.searchSource().terminateAfter(); - SearchHit searchHit = new SearchHit(ordinal, String.valueOf(ordinal)); + SearchHit searchHit = SearchHit.unpooled(ordinal, String.valueOf(ordinal)); searchHit.sortValues( new SearchSortValues(new Long[] { (long) ordinal, 1L }, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW }) ); - SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); + SearchHits searchHits = SearchHits.unpooled(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); ActionListener.respondAndRelease( l, new SearchResponse(searchHits, null, null, false, false, null, 0, null, 0, 1, 0, 0, null, Clusters.EMPTY) @@ -124,7 +124,7 @@ public void fetchHits(Iterable> refs, ActionListener ref : refs) { List hits = new ArrayList<>(ref.size()); for (HitReference hitRef : ref) { - hits.add(new SearchHit(-1, hitRef.id())); + hits.add(SearchHit.unpooled(-1, hitRef.id())); } searchHits.add(hits); } @@ -425,12 +425,12 @@ private class SuccessfulESMockClient extends ESMockClient { @Override void handleSearchRequest(ActionListener listener, SearchRequest searchRequest) { int ordinal = searchRequest.source().terminateAfter(); - SearchHit searchHit = new SearchHit(ordinal, String.valueOf(ordinal)); + SearchHit searchHit = SearchHit.unpooled(ordinal, String.valueOf(ordinal)); searchHit.sortValues( new SearchSortValues(new Long[] { (long) ordinal, 1L }, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW }) ); - SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); + SearchHits searchHits = SearchHits.unpooled(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); SearchResponse response = new SearchResponse( searchHits, null, @@ -477,11 +477,11 @@ void handleSearchRequest(ActionListener void handleSearchRequest(ActionListener li return; } - var hits = searchResponse.getHits().getHits(); - delegate.onResponse(UnparsedModel.unparsedModelFromMap(createModelConfigMap(hits, modelId))); + delegate.onResponse(UnparsedModel.unparsedModelFromMap(createModelConfigMap(searchResponse.getHits(), modelId))); }); QueryBuilder queryBuilder = documentIdQuery(modelId); @@ -132,8 +132,7 @@ public void getModel(String modelId, ActionListener listener) { return; } - var hits = searchResponse.getHits().getHits(); - var modelConfigs = parseHitsAsModels(hits).stream().map(UnparsedModel::unparsedModelFromMap).toList(); + var modelConfigs = parseHitsAsModels(searchResponse.getHits()).stream().map(UnparsedModel::unparsedModelFromMap).toList(); assert modelConfigs.size() == 1; delegate.onResponse(modelConfigs.get(0)); }); @@ -162,8 +161,7 @@ public void getModelsByTaskType(TaskType taskType, ActionListener> listener) { return; } - var hits = searchResponse.getHits().getHits(); - var modelConfigs = parseHitsAsModels(hits).stream().map(UnparsedModel::unparsedModelFromMap).toList(); + var modelConfigs = parseHitsAsModels(searchResponse.getHits()).stream().map(UnparsedModel::unparsedModelFromMap).toList(); delegate.onResponse(modelConfigs); }); @@ -212,7 +209,7 @@ public void getAllModels(ActionListener> listener) { client.search(modelSearch, searchListener); } - private List parseHitsAsModels(SearchHit[] hits) { + private List parseHitsAsModels(SearchHits hits) { var modelConfigs = new ArrayList(); for (var hit : hits) { modelConfigs.add(new ModelConfigMap(hit.getSourceAsMap(), Map.of())); @@ -220,8 +217,8 @@ private List parseHitsAsModels(SearchHit[] hits) { return modelConfigs; } - private ModelConfigMap createModelConfigMap(SearchHit[] hits, String modelId) { - Map mappedHits = Arrays.stream(hits).collect(Collectors.toMap(hit -> { + private ModelConfigMap createModelConfigMap(SearchHits hits, String modelId) { + Map mappedHits = Arrays.stream(hits.getHits()).collect(Collectors.toMap(hit -> { if (hit.getIndex().startsWith(InferenceIndex.INDEX_NAME)) { return InferenceIndex.INDEX_NAME; } diff --git a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java index f75dd2926059a..f595153e4d6dd 100644 --- a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java +++ b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java @@ -222,18 +222,18 @@ protected void } private SearchHits prepareSearchHits() { - SearchHit hit1 = new SearchHit(0, "1"); + SearchHit hit1 = SearchHit.unpooled(0, "1"); hit1.score(1f); hit1.shard(new SearchShardTarget("a", new ShardId("a", "indexUUID", 0), null)); - SearchHit hit2 = new SearchHit(0, "2"); + SearchHit hit2 = SearchHit.unpooled(0, "2"); hit2.score(1f); hit2.shard(new SearchShardTarget("a", new ShardId("a", "indexUUID", 0), null)); - SearchHit hit3 = new SearchHit(0, "3*"); + SearchHit hit3 = SearchHit.unpooled(0, "3*"); hit3.score(1f); hit3.shard(new SearchShardTarget("a", new ShardId("a", "indexUUID", 0), null)); - return new SearchHits(new SearchHit[] { hit1, hit2, hit3 }, new TotalHits(3L, TotalHits.Relation.EQUAL_TO), 1f); + return SearchHits.unpooled(new SearchHit[] { hit1, hit2, hit3 }, new TotalHits(3L, TotalHits.Relation.EQUAL_TO), 1f); } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java index a5c47524b6934..f28f6eff25b04 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ModelSnapshotRetentionIT.java @@ -51,7 +51,6 @@ import static org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; public class ModelSnapshotRetentionIT extends MlNativeAutodetectIntegTestCase { @@ -191,8 +190,7 @@ private List getAvailableModelStateDocIds() throws Exception { private List getDocIdsFromSearch(SearchRequest searchRequest) throws Exception { List docIds = new ArrayList<>(); assertResponse(client().execute(TransportSearchAction.TYPE, searchRequest), searchResponse -> { - assertThat(searchResponse.getHits(), notNullValue()); - for (SearchHit searchHit : searchResponse.getHits().getHits()) { + for (SearchHit searchHit : searchResponse.getHits()) { docIds.add(searchHit.getId()); } }); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java index 4cfcf6509faa0..be8a098ed3986 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.sort.SortOrder; @@ -126,8 +127,7 @@ protected InputStream initScroll(long startTimestamp) throws IOException { logger.debug("[{}] Search response was obtained", context.jobId); timingStatsReporter.reportSearchDuration(searchResponse.getTook()); scrollId = searchResponse.getScrollId(); - SearchHit hits[] = searchResponse.getHits().getHits(); - return processAndConsumeSearchHits(hits); + return processAndConsumeSearchHits(searchResponse.getHits()); } finally { searchResponse.decRef(); } @@ -184,9 +184,9 @@ private SearchRequestBuilder buildSearchRequest(long start) { /** * IMPORTANT: This is not an idempotent method. This method changes the input array by setting each element to null. */ - private InputStream processAndConsumeSearchHits(SearchHit hits[]) throws IOException { + private InputStream processAndConsumeSearchHits(SearchHits hits) throws IOException { - if (hits == null || hits.length == 0) { + if (hits.getHits().length == 0) { hasNext = false; clearScroll(); return null; @@ -194,11 +194,10 @@ private InputStream processAndConsumeSearchHits(SearchHit hits[]) throws IOExcep BytesStreamOutput outputStream = new BytesStreamOutput(); - SearchHit lastHit = hits[hits.length - 1]; + SearchHit lastHit = hits.getAt(hits.getHits().length - 1); lastTimestamp = context.extractedFields.timeFieldValue(lastHit); try (SearchHitToJsonProcessor hitProcessor = new SearchHitToJsonProcessor(context.extractedFields, outputStream)) { - for (int i = 0; i < hits.length; i++) { - SearchHit hit = hits[i]; + for (SearchHit hit : hits) { if (isCancelled) { Long timestamp = context.extractedFields.timeFieldValue(hit); if (timestamp != null) { @@ -212,9 +211,6 @@ private InputStream processAndConsumeSearchHits(SearchHit hits[]) throws IOExcep } } hitProcessor.process(hit); - // hack to remove the reference from object. This object can be big and consume alot of memory. - // We are removing it as soon as we process it. - hits[i] = null; } } return outputStream.bytes().streamInput(); @@ -237,8 +233,7 @@ private InputStream continueScroll() throws IOException { logger.debug("[{}] Search response was obtained", context.jobId); timingStatsReporter.reportSearchDuration(searchResponse.getTook()); scrollId = searchResponse.getScrollId(); - SearchHit hits[] = searchResponse.getHits().getHits(); - return processAndConsumeSearchHits(hits); + return processAndConsumeSearchHits(searchResponse.getHits()); } finally { if (searchResponse != null) { searchResponse.decRef(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java index 4119b23747fcb..c890ab599c380 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java @@ -19,6 +19,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.ClientHelper; @@ -154,11 +155,11 @@ public void preview(ActionListener> listener) { return; } - final SearchHit[] hits = searchResponse.getHits().getHits(); - List rows = new ArrayList<>(hits.length); - for (SearchHit hit : hits) { - String[] extractedValues = extractValues(hit); - rows.add(extractedValues == null ? new Row(null, hit, true) : new Row(extractedValues, hit, false)); + List rows = new ArrayList<>(searchResponse.getHits().getHits().length); + for (SearchHit hit : searchResponse.getHits().getHits()) { + var unpooled = hit.asUnpooled(); + String[] extractedValues = extractValues(unpooled); + rows.add(extractedValues == null ? new Row(null, unpooled, true) : new Row(extractedValues, unpooled, false)); } delegate.onResponse(rows); }) @@ -251,8 +252,8 @@ private List processSearchResponse(SearchResponse searchResponse) { return null; } - SearchHit[] hits = searchResponse.getHits().getHits(); - List rows = new ArrayList<>(hits.length); + SearchHits hits = searchResponse.getHits(); + List rows = new ArrayList<>(hits.getHits().length); for (SearchHit hit : hits) { if (isCancelled) { hasNext = false; @@ -317,12 +318,13 @@ private String[] extractProcessedValue(ProcessedField processedField, SearchHit } private Row createRow(SearchHit hit) { - String[] extractedValues = extractValues(hit); + var unpooled = hit.asUnpooled(); + String[] extractedValues = extractValues(unpooled); if (extractedValues == null) { - return new Row(null, hit, true); + return new Row(null, unpooled, true); } boolean isTraining = trainTestSplitter.get().isTraining(extractedValues); - Row row = new Row(extractedValues, hit, isTraining); + Row row = new Row(extractedValues, unpooled, isTraining); LOGGER.trace( () -> format( "[%s] Extracted row: sort key = [%s], is_training = [%s], values = %s", diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/TestDocsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/TestDocsIterator.java index bd37706622187..9e2db58befdbf 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/TestDocsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/TestDocsIterator.java @@ -61,7 +61,7 @@ protected FieldSortBuilder sortField() { @Override protected SearchHit map(SearchHit hit) { - return hit; + return hit.asUnpooled(); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java index dd6d498b425d5..b502e0d6db341 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java @@ -52,6 +52,7 @@ import org.elasticsearch.index.reindex.DeleteByQueryRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.Sum; @@ -663,7 +664,7 @@ public void getTrainedModel( ActionListener trainedModelSearchHandler = ActionListener.wrap(modelSearchResponse -> { TrainedModelConfig.Builder builder; try { - builder = handleHits(modelSearchResponse.getHits().getHits(), modelId, this::parseModelConfigLenientlyFromSource).get(0); + builder = handleHits(modelSearchResponse.getHits(), modelId, this::parseModelConfigLenientlyFromSource).get(0); } catch (ResourceNotFoundException ex) { getTrainedModelListener.onFailure( new ResourceNotFoundException(Messages.getMessage(Messages.INFERENCE_NOT_FOUND, modelId)) @@ -701,7 +702,7 @@ public void getTrainedModel( ActionListener.wrap(definitionSearchResponse -> { try { List docs = handleHits( - definitionSearchResponse.getHits().getHits(), + definitionSearchResponse.getHits(), modelId, (bytes, resourceId) -> ChunkedTrainedModelRestorer.parseModelDefinitionDocLenientlyFromSource( bytes, @@ -1268,15 +1269,15 @@ private static Set matchedResourceIds(String[] tokens) { } private static List handleHits( - SearchHit[] hits, + SearchHits hits, String resourceId, CheckedBiFunction parseLeniently ) throws Exception { - if (hits.length == 0) { + if (hits.getHits().length == 0) { throw new ResourceNotFoundException(resourceId); } - List results = new ArrayList<>(hits.length); - String initialIndex = hits[0].getIndex(); + List results = new ArrayList<>(hits.getHits().length); + String initialIndex = hits.getAt(0).getIndex(); for (SearchHit hit : hits) { // We don't want to spread across multiple backing indices if (hit.getIndex().equals(initialIndex)) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastActionTests.java index 92ceb536cfd43..29a8a35ff0fdd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastActionTests.java @@ -86,7 +86,7 @@ private static SearchHit createForecastStatsHit(ForecastRequestStats.ForecastReq ForecastRequestStats.STATUS.getPreferredName(), new DocumentField(ForecastRequestStats.STATUS.getPreferredName(), Collections.singletonList(status.toString())) ); - SearchHit hit = new SearchHit(0, ""); + SearchHit hit = SearchHit.unpooled(0, ""); hit.addDocumentFields(documentFields, Map.of()); return hit; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java index 12ce45a186d62..4bbaafa9db0cd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractorTests.java @@ -555,7 +555,8 @@ private SearchResponse createSearchResponse(long totalHits, long earliestTime, l SearchResponse searchResponse = mock(SearchResponse.class); when(searchResponse.status()).thenReturn(RestStatus.OK); SearchHit[] hits = new SearchHit[(int) totalHits]; - SearchHits searchHits = new SearchHits(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 1); + Arrays.fill(hits, SearchHit.unpooled(1)); + SearchHits searchHits = SearchHits.unpooled(hits, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 1); when(searchResponse.getHits()).thenReturn(searchHits); List aggs = new ArrayList<>(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java index bf7aa465ee604..2dd17e434cccb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorTests.java @@ -547,7 +547,8 @@ private SearchResponse createSearchResponse(List timestamps, List hits.add(hit); } SearchHits searchHits = new SearchHits(hits.toArray(SearchHits.EMPTY), new TotalHits(hits.size(), TotalHits.Relation.EQUAL_TO), 1); - when(searchResponse.getHits()).thenReturn(searchHits); + when(searchResponse.getHits()).thenReturn(searchHits.asUnpooled()); + searchHits.decRef(); when(searchResponse.getTook()).thenReturn(TimeValue.timeValueMillis(randomNonNegativeLong())); return searchResponse; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTaskTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTaskTests.java index 63afc4ef6659c..8d8cded819e23 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTaskTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsTaskTests.java @@ -243,10 +243,12 @@ public void testPersistProgress_ProgressDocumentCreated() throws IOException { } public void testPersistProgress_ProgressDocumentUpdated() throws IOException { - testPersistProgress( - new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f), - ".ml-state-dummy" - ); + var hits = new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f); + try { + testPersistProgress(hits, ".ml-state-dummy"); + } finally { + hits.decRef(); + } } public void testSetFailed() throws IOException { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java index 7bc3d507ecf22..993e00bd4adf4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractorTests.java @@ -654,7 +654,8 @@ private SearchResponse createSearchResponse(List field1Values, List buildSearchHits(List> vals) { - return vals.stream() - .map(InferenceRunnerTests::fromMap) - .map(reference -> SearchHit.createFromMap(Collections.singletonMap("_source", reference))) - .collect(Collectors.toCollection(ArrayDeque::new)); + return vals.stream().map(InferenceRunnerTests::fromMap).map(reference -> { + var pooled = SearchHit.createFromMap(Collections.singletonMap("_source", reference)); + try { + return pooled.asUnpooled(); + } finally { + pooled.decRef(); + } + }).collect(Collectors.toCollection(ArrayDeque::new)); } private static BytesReference fromMap(Map map) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoinerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoinerTests.java index 99dfd9e919a6a..3a95a3bb65f10 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoinerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameRowsJoinerTests.java @@ -309,7 +309,7 @@ private void givenDataFrameBatches(List> batche } private static SearchHit newHit(String json) { - SearchHit hit = new SearchHit(randomInt(), randomAlphaOfLength(10)); + SearchHit hit = SearchHit.unpooled(randomInt(), randomAlphaOfLength(10)); hit.sourceRef(new BytesArray(json)); return hit; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java index baae42b99640f..db81fc2db3348 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java @@ -358,10 +358,12 @@ public void testPersistQuantilesSync_QuantilesDocumentCreated() { } public void testPersistQuantilesSync_QuantilesDocumentUpdated() { - testPersistQuantilesSync( - new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f), - ".ml-state-dummy" - ); + var hits = new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f); + try { + testPersistQuantilesSync(hits, ".ml-state-dummy"); + } finally { + hits.decRef(); + } } @SuppressWarnings("unchecked") @@ -397,10 +399,12 @@ public void testPersistQuantilesAsync_QuantilesDocumentCreated() { } public void testPersistQuantilesAsync_QuantilesDocumentUpdated() { - testPersistQuantilesAsync( - new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f), - ".ml-state-dummy" - ); + var hits = new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f); + try { + testPersistQuantilesAsync(hits, ".ml-state-dummy"); + } finally { + hits.decRef(); + } } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java index 8179a97955a57..3dcbbeb3fcce5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProviderTests.java @@ -928,7 +928,8 @@ private static SearchResponse createSearchResponse(List> sou list.add(hit); } SearchHits hits = new SearchHits(list.toArray(SearchHits.EMPTY), new TotalHits(source.size(), TotalHits.Relation.EQUAL_TO), 1); - when(response.getHits()).thenReturn(hits); + when(response.getHits()).thenReturn(hits.asUnpooled()); + hits.decRef(); return response; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java index 59a79def9bd10..33e5582ec992a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/MockClientBuilder.java @@ -137,7 +137,8 @@ public MockClientBuilder prepareSearch(String indexName, List do SearchResponse response = mock(SearchResponse.class); SearchHits searchHits = new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0.0f); - when(response.getHits()).thenReturn(searchHits); + when(response.getHits()).thenReturn(searchHits.asUnpooled()); + searchHits.decRef(); doAnswer(new Answer() { @Override @@ -176,7 +177,8 @@ public MockClientBuilder prepareSearchFields(String indexName, List() { @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamerTests.java index 90280bc08de17..47f7d8c65a27a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamerTests.java @@ -107,7 +107,8 @@ private static SearchResponse createSearchResponse(List> sou hits[i++] = hit; } SearchHits searchHits = new SearchHits(hits, null, (float) 0.0); - when(searchResponse.getHits()).thenReturn(searchHits); + when(searchResponse.getHits()).thenReturn(searchHits.asUnpooled()); + searchHits.decRef(); return searchResponse; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java index 3048a1144ac55..6ec43ca2a3201 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemoverTests.java @@ -97,7 +97,8 @@ static SearchResponse createSearchResponseFromHits(List hits) { 1.0f ); SearchResponse searchResponse = mock(SearchResponse.class); - when(searchResponse.getHits()).thenReturn(searchHits); + when(searchResponse.getHits()).thenReturn(searchHits.asUnpooled()); + searchHits.decRef(); return searchResponse; } @@ -111,7 +112,8 @@ private static SearchResponse createSearchResponse(List to } SearchHits hits = new SearchHits(hitsArray, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 1.0f); SearchResponse searchResponse = mock(SearchResponse.class); - when(searchResponse.getHits()).thenReturn(hits); + when(searchResponse.getHits()).thenReturn(hits.asUnpooled()); + hits.decRef(); return searchResponse; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessorTests.java index 520efd5e77244..a7ba148584637 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessorTests.java @@ -124,7 +124,7 @@ public void testStateRead_StateDocumentCreated() throws IOException { public void testStateRead_StateDocumentUpdated() throws IOException { testStateRead( - new SearchHits(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f), + SearchHits.unpooled(new SearchHit[] { SearchHit.createFromMap(Map.of("_index", ".ml-state-dummy")) }, null, 0.0f), ".ml-state-dummy" ); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/test/SearchHitBuilder.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/test/SearchHitBuilder.java index f2affbe6d2869..59a3b86ef0bd5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/test/SearchHitBuilder.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/test/SearchHitBuilder.java @@ -22,7 +22,7 @@ public class SearchHitBuilder { private final SearchHit hit; public SearchHitBuilder(int docId) { - hit = new SearchHit(docId, null); + hit = SearchHit.unpooled(docId, null); } public SearchHitBuilder addField(String name, Object value) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java index 4f1308e9295c2..4fded8ef8d05d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIteratorTests.java @@ -172,7 +172,8 @@ protected SearchResponse createSearchResponseWithHits(String... hits) { SearchHits searchHits = createHits(hits); SearchResponse searchResponse = mock(SearchResponse.class); when(searchResponse.getScrollId()).thenReturn(SCROLL_ID); - when(searchResponse.getHits()).thenReturn(searchHits); + when(searchResponse.getHits()).thenReturn(searchHits.asUnpooled()); + searchHits.decRef(); return searchResponse; } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java index e481cf70b9afe..79cf0cb9f7987 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java @@ -81,7 +81,6 @@ public void testFetchAllByEntityWithBrokenScroll() { request.scroll(TimeValue.timeValueHours(10L)); String scrollId = randomAlphaOfLength(5); - SearchHit[] hits = new SearchHit[] { new SearchHit(1), new SearchHit(2) }; Answer returnResponse = invocation -> { @SuppressWarnings("unchecked") @@ -89,7 +88,11 @@ public void testFetchAllByEntityWithBrokenScroll() { ActionListener.respondAndRelease( listener, new SearchResponse( - new SearchHits(hits, new TotalHits(3, TotalHits.Relation.EQUAL_TO), 1), + SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(1), SearchHit.unpooled(2) }, + new TotalHits(3, TotalHits.Relation.EQUAL_TO), + 1 + ), null, null, false, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index 3386c1d7930b5..4127b8cdad32b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -197,25 +197,30 @@ protected void SearchRequest searchRequest = (SearchRequest) request; searchRequests.add(searchRequest); final SearchHit[] hits = searchFunction.apply(searchRequest); - ActionListener.respondAndRelease( - listener, - (Response) new SearchResponse( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1, - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ) - ); + final var searchHits = new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f); + try { + ActionListener.respondAndRelease( + listener, + (Response) new SearchResponse( + searchHits, + null, + null, + false, + false, + null, + 1, + "_scrollId1", + 1, + 1, + 0, + 1, + null, + null + ) + ); + } finally { + searchHits.decRef(); + } } else if (TransportSearchScrollAction.TYPE.name().equals(action.name())) { assertThat(request, instanceOf(SearchScrollRequest.class)); ActionListener.respondAndRelease( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index b921fef9fd917..ac11dee8d4a48 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -405,7 +405,7 @@ public void testInvalidateApiKeysWillSetInvalidatedFlagAndRecordTimestamp() { when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn(new SearchRequestBuilder(client)); doAnswer(invocation -> { final var listener = (ActionListener) invocation.getArguments()[1]; - final var searchHit = new SearchHit(docId, apiKeyId); + final var searchHit = SearchHit.unpooled(docId, apiKeyId); try (XContentBuilder builder = JsonXContent.contentBuilder()) { builder.map(buildApiKeySourceDoc("some_hash".toCharArray())); searchHit.sourceRef(BytesReference.bytes(builder)); @@ -413,7 +413,7 @@ public void testInvalidateApiKeysWillSetInvalidatedFlagAndRecordTimestamp() { ActionListener.respondAndRelease( listener, new SearchResponse( - new SearchHits( + SearchHits.unpooled( new SearchHit[] { searchHit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), randomFloat(), @@ -758,7 +758,7 @@ public void testCrossClusterApiKeyUsageStats() { ActionListener.respondAndRelease( listener, new SearchResponse( - new SearchHits( + SearchHits.unpooled( searchHits.toArray(SearchHit[]::new), new TotalHits(searchHits.size(), TotalHits.Relation.EQUAL_TO), randomFloat(), @@ -825,7 +825,7 @@ private SearchHit searchHitForCrossClusterApiKey(int crossClusterAccessLevel) { }; final int docId = randomIntBetween(0, Integer.MAX_VALUE); final String apiKeyId = randomAlphaOfLength(20); - final var searchHit = new SearchHit(docId, apiKeyId); + final var searchHit = SearchHit.unpooled(docId, apiKeyId); try (XContentBuilder builder = JsonXContent.contentBuilder()) { builder.map(XContentHelper.convertToMap(JsonXContent.jsonXContent, Strings.format(""" { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index f2cb09a2c9d3d..adf0b44266260 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -1237,9 +1237,9 @@ private void mockTokenForRefreshToken( assertThat(refreshFilter.fieldName(), is("refresh_token.token")); final SearchHits hits; if (storedRefreshToken.equals(refreshFilter.value())) { - SearchHit hit = new SearchHit(randomInt(), "token_" + userToken.getId()); + SearchHit hit = SearchHit.unpooled(randomInt(), "token_" + userToken.getId()); hit.sourceRef(docSource); - hits = new SearchHits(new SearchHit[] { hit }, null, 1); + hits = SearchHits.unpooled(new SearchHit[] { hit }, null, 1); } else { hits = SearchHits.EMPTY_WITH_TOTAL_HITS; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java index 3a9fee4288bf2..33d3e6783b9e6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java @@ -263,7 +263,7 @@ public void testFindTokensFor() { if (r instanceof SearchRequest) { final SearchHit[] hits = IntStream.range(0, nhits) .mapToObj( - i -> new SearchHit( + i -> SearchHit.unpooled( randomIntBetween(0, Integer.MAX_VALUE), SERVICE_ACCOUNT_TOKEN_DOC_TYPE + "-" + accountId.asPrincipal() + "/" + tokenNames[i] ) @@ -272,7 +272,7 @@ public void testFindTokensFor() { ActionListener.respondAndRelease( l, new SearchResponse( - new SearchHits(hits, new TotalHits(nhits, TotalHits.Relation.EQUAL_TO), randomFloat(), null, null, null), + SearchHits.unpooled(hits, new TotalHits(nhits, TotalHits.Relation.EQUAL_TO), randomFloat(), null, null, null), null, null, false, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 169275ccc3ee3..a0008ba632151 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -346,7 +346,7 @@ private void doAnswerWithSearchResult(Client client, ExpressionRoleMapping mappi doAnswer(invocation -> { @SuppressWarnings("unchecked") final var listener = (ActionListener) invocation.getArguments()[1]; - final var searchHit = new SearchHit( + final var searchHit = SearchHit.unpooled( randomIntBetween(0, Integer.MAX_VALUE), NativeRoleMappingStore.getIdForName(mapping.getName()) ); @@ -357,14 +357,7 @@ private void doAnswerWithSearchResult(Client client, ExpressionRoleMapping mappi ActionListener.respondAndRelease( listener, new SearchResponse( - new SearchHits( - new SearchHit[] { searchHit }, - new TotalHits(1, TotalHits.Relation.EQUAL_TO), - randomFloat(), - null, - null, - null - ), + SearchHits.unpooled(new SearchHit[] { searchHit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), randomFloat()), null, null, false, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index 0c2f9cefbcffb..ed1b5e6c7668b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -818,22 +818,12 @@ private SearchHit[] buildHits(List sourcePrivile } private static SearchResponse buildSearchResponse(SearchHit[] hits) { - return new SearchResponse( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1, - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ); + var searchHits = new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f); + try { + return new SearchResponse(searchHits.asUnpooled(), null, null, false, false, null, 1, "_scrollId1", 1, 1, 0, 1, null, null); + } finally { + searchHits.decRef(); + } } private void handleBulkRequest(int expectedCount, Predicate> isCreated) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitCursor.java index 36a42aaad7161..8fa41017762a7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitCursor.java @@ -156,14 +156,12 @@ static void handle( logSearchResponse(response, log); } - SearchHit[] hits = response.getHits().getHits(); - SearchHitRowSet rowSet = makeRowSet.get(); if (rowSet.hasRemaining() == false) { closePointInTime(client, response.pointInTimeId(), listener.delegateFailureAndWrap((l, r) -> l.onResponse(Page.last(rowSet)))); } else { - updateSearchAfter(hits, source); + updateSearchAfter(response.getHits().getHits(), source); SearchHitCursor nextCursor = new SearchHitCursor( source, diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java index ba6a9854e4254..b6e3e8b759352 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitRowSet.java @@ -28,9 +28,8 @@ * Extracts rows from an array of {@link SearchHit}. */ class SearchHitRowSet extends ResultRowSet { - private final SearchHit[] hits; + private final SearchHits hits; private final Map> flatInnerHits = new HashMap<>(); - private final Set innerHits = new LinkedHashSet<>(); private final String innerHit; private final int size; @@ -42,13 +41,14 @@ class SearchHitRowSet extends ResultRowSet { SearchHitRowSet(List exts, BitSet mask, int sizeRequested, int limit, SearchResponse response) { super(exts, mask); - this.hits = response.getHits().getHits(); + this.hits = response.getHits().asUnpooled(); // Since the results might contain nested docs, the iteration is similar to that of Aggregation // namely it discovers the nested docs and then, for iteration, increments the deepest level first // and eventually carries that over to the top level String innerHit = null; + Set innerHits = new LinkedHashSet<>(); for (HitExtractor ex : exts) { if (ex.hitName() != null) { innerHits.add(ex.hitName()); @@ -58,7 +58,7 @@ class SearchHitRowSet extends ResultRowSet { } } - int sz = hits.length; + int sz = hits.getHits().length; int maxDepth = 0; if (innerHits.isEmpty() == false) { @@ -106,7 +106,7 @@ protected Object extractValue(HitExtractor e) { int extractorLevel = e.hitName() == null ? 0 : 1; SearchHit hit = null; - SearchHit[] sh = hits; + SearchHit[] sh = hits.getHits(); for (int lvl = 0; lvl <= extractorLevel; lvl++) { // TODO: add support for multi-nested doc if (hit != null) { @@ -172,7 +172,7 @@ protected boolean doNext() { // increment last row indexPerLevel[indexPerLevel.length - 1]++; // then check size - SearchHit[] sh = hits; + SearchHit[] sh = hits.getHits(); for (int lvl = 0; lvl < indexPerLevel.length; lvl++) { if (indexPerLevel[lvl] == sh.length) { // reset the current branch @@ -181,7 +181,7 @@ protected boolean doNext() { indexPerLevel[lvl - 1]++; // restart the loop lvl = 0; - sh = hits; + sh = hits.getHits(); } else { SearchHit h = sh[indexPerLevel[lvl]]; // TODO: improve this for multi-nested responses diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java index d06a239e61ce7..112be29d2dcd8 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ComputingExtractorTests.java @@ -82,7 +82,7 @@ public void testGet() { double value = randomDouble(); double expected = Math.log(value); DocumentField field = new DocumentField(fieldName, singletonList(value)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); assertEquals(expected, extractor.process(hit)); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java index 5c3fc378d90c1..b951f96e8b933 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/FieldHitExtractorTests.java @@ -94,7 +94,7 @@ public void testGetDottedValueWithDocValues() { } DocumentField field = new DocumentField(fieldName, documentFieldValues); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); Object result = documentFieldValues.isEmpty() ? null : documentFieldValues.get(0); assertEquals(result, extractor.extract(hit)); @@ -112,7 +112,7 @@ public void testGetDocValue() { documentFieldValues.add(randomValue()); } DocumentField field = new DocumentField(fieldName, documentFieldValues); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); Object result = documentFieldValues.isEmpty() ? null : documentFieldValues.get(0); assertEquals(result, extractor.extract(hit)); @@ -127,7 +127,7 @@ public void testGetDate() { ZonedDateTime zdt = DateUtils.asDateTimeWithMillis(millis, zoneId).plusNanos(nanosOnly); List documentFieldValues = Collections.singletonList(StringUtils.toString(zdt)); DocumentField field = new DocumentField("my_date_nanos_field", documentFieldValues); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField("my_date_nanos_field", field); FieldHitExtractor extractor = new FieldHitExtractor("my_date_nanos_field", DATETIME, zoneId, LENIENT); assertEquals(zdt, extractor.extract(hit)); @@ -144,7 +144,7 @@ public void testMultiValuedDocValue() { String fieldName = randomAlphaOfLength(5); FieldHitExtractor fe = getFieldHitExtractor(fieldName); DocumentField field = new DocumentField(fieldName, asList("a", "b")); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); Exception ex = expectThrows(InvalidArgumentException.class, () -> fe.extract(hit)); assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); @@ -154,7 +154,7 @@ public void testExtractSourcePath() { FieldHitExtractor fe = getFieldHitExtractor("a.b.c"); Object value = randomValue(); DocumentField field = new DocumentField("a.b.c", singletonList(value)); - SearchHit hit = new SearchHit(1, null, null); + SearchHit hit = SearchHit.unpooled(1, null, null); hit.setDocumentField("a.b.c", field); assertThat(fe.extract(hit), is(value)); } @@ -163,7 +163,7 @@ public void testMultiValuedSource() { FieldHitExtractor fe = getFieldHitExtractor("a"); Object value = randomValue(); DocumentField field = new DocumentField("a", asList(value, value)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField("a", field); Exception ex = expectThrows(InvalidArgumentException.class, () -> fe.extract(hit)); assertThat(ex.getMessage(), is("Arrays (returned by [a]) are not supported")); @@ -174,7 +174,7 @@ public void testMultiValuedSourceAllowed() { Object valueA = randomValue(); Object valueB = randomValue(); DocumentField field = new DocumentField("a", asList(valueA, valueB)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField("a", field); assertEquals(valueA, fe.extract(hit)); } @@ -187,7 +187,7 @@ public void testGeoShapeExtraction() { map.put("coordinates", asList(1d, 2d)); map.put("type", "Point"); DocumentField field = new DocumentField(fieldName, singletonList(map)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); assertEquals(new GeoShape(1, 2), fe.extract(hit)); @@ -204,14 +204,14 @@ public void testMultipleGeoShapeExtraction() { map2.put("coordinates", asList(3d, 4d)); map2.put("type", "Point"); DocumentField field = new DocumentField(fieldName, asList(map1, map2)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); Exception ex = expectThrows(InvalidArgumentException.class, () -> fe.extract(hit)); assertThat(ex.getMessage(), is("Arrays (returned by [" + fieldName + "]) are not supported")); FieldHitExtractor lenientFe = new FieldHitExtractor(fieldName, randomBoolean() ? GEO_SHAPE : SHAPE, UTC, LENIENT); - SearchHit searchHit = new SearchHit(1, "1"); + SearchHit searchHit = SearchHit.unpooled(1, "1"); searchHit.setDocumentField(fieldName, new DocumentField(fieldName, singletonList(map2))); assertEquals(new GeoShape(3, 4), lenientFe.extract(searchHit)); } @@ -223,7 +223,7 @@ public void testUnsignedLongExtraction() { String fieldName = randomAlphaOfLength(10); DocumentField field = new DocumentField(fieldName, singletonList(value)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); FieldHitExtractor fe = new FieldHitExtractor(fieldName, UNSIGNED_LONG, randomZone(), randomBoolean() ? NONE : LENIENT); @@ -237,7 +237,7 @@ public void testVersionExtraction() { String fieldName = randomAlphaOfLength(10); DocumentField field = new DocumentField(fieldName, singletonList(value)); - SearchHit hit = new SearchHit(1, null); + SearchHit hit = SearchHit.unpooled(1, null); hit.setDocumentField(fieldName, field); FieldHitExtractor fe = new FieldHitExtractor(fieldName, VERSION, randomZone(), randomBoolean() ? NONE : LENIENT); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractorTests.java index fdce6cbcf0c2f..5d007218aeeb1 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/ScoreExtractorTests.java @@ -14,7 +14,7 @@ public void testGet() { int times = between(1, 1000); for (int i = 0; i < times; i++) { float score = randomFloat(); - SearchHit hit = new SearchHit(1); + SearchHit hit = SearchHit.unpooled(1); hit.score(score); assertEquals(score, ScoreExtractor.INSTANCE.extract(hit)); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java index b7f123f82cf98..9e83df706a77b 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TopHitsAggExtractorTests.java @@ -115,7 +115,7 @@ public void testExtractUnsignedLong() { private SearchHits searchHitsOf(Object value) { TotalHits totalHits = new TotalHits(10, TotalHits.Relation.EQUAL_TO); - SearchHit searchHit = new SearchHit(1, "docId"); + SearchHit searchHit = SearchHit.unpooled(1, "docId"); searchHit.addDocumentFields( Collections.singletonMap("topHitsAgg", new DocumentField("field", Collections.singletonList(value))), Collections.singletonMap( @@ -123,6 +123,6 @@ private SearchHits searchHitsOf(Object value) { new DocumentField("_ignored", Collections.singletonList(randomValueOtherThan(value, () -> randomAlphaOfLength(5)))) ) ); - return new SearchHits(new SearchHit[] { searchHit }, totalHits, 0.0f); + return SearchHits.unpooled(new SearchHit[] { searchHit }, totalHits, 0.0f); } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndexTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndexTests.java index d76b6b67368f9..69139bc3f7561 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndexTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/SeqNoPrimaryTermAndIndexTests.java @@ -29,7 +29,7 @@ public void testEquals() { } public void testFromSearchHit() { - SearchHit searchHit = new SearchHit(1); + SearchHit searchHit = SearchHit.unpooled(1); long seqNo = randomLongBetween(-2, 10_000); long primaryTerm = randomLongBetween(-2, 10_000); String index = randomAlphaOfLength(10); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index 8ee7e902285c9..fa8e867d77a49 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -544,7 +544,11 @@ protected void ActionListener.respondAndRelease( listener, (Response) new SearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + SearchHits.unpooled( + new SearchHit[] { SearchHit.unpooled(1) }, + new TotalHits(1L, TotalHits.Relation.EQUAL_TO), + 1.0f + ), // Simulate completely null aggs null, new Suggest(Collections.emptyList()), diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java index 5dee74cccee7a..a18c926e21da6 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java @@ -513,7 +513,7 @@ public void testRetentionPolicyDeleteByQueryThrowsIrrecoverable() throws Excepti ); final SearchResponse searchResponse = new SearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + SearchHits.unpooled(new SearchHit[] { SearchHit.unpooled(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), // Simulate completely null aggs null, new Suggest(Collections.emptyList()), @@ -606,7 +606,7 @@ public void testRetentionPolicyDeleteByQueryThrowsTemporaryProblem() throws Exce ); final SearchResponse searchResponse = new SearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + SearchHits.unpooled(new SearchHit[] { SearchHit.unpooled(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), // Simulate completely null aggs null, new Suggest(Collections.emptyList()), @@ -702,7 +702,7 @@ public void testFailureCounterIsResetOnSuccess() throws Exception { ); final SearchResponse searchResponse = new SearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + SearchHits.unpooled(new SearchHit[] { SearchHit.unpooled(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), // Simulate completely null aggs null, new Suggest(Collections.emptyList()), diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java index 708cb3d93cbed..512fd7a2383a1 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; @@ -112,7 +113,7 @@ public void testTermsFieldCollector() throws IOException { Aggregations aggs = new Aggregations(Collections.singletonList(composite)); SearchResponse response = new SearchResponse( - null, + SearchHits.EMPTY_WITH_TOTAL_HITS, aggs, null, false, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java index dab6d8518d28f..fd4e60e485200 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; @@ -171,7 +172,7 @@ private static QueryBuilder buildFilterQuery(ChangeCollector collector) { private static SearchResponse buildSearchResponse(SingleValue minTimestamp, SingleValue maxTimestamp) { return new SearchResponse( - null, + SearchHits.EMPTY_WITH_TOTAL_HITS, new Aggregations(Arrays.asList(minTimestamp, maxTimestamp)), null, false, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java index 67f923769ffe3..be0bb177267bc 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; @@ -19,6 +20,7 @@ import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.search.SearchHits; @@ -39,6 +41,8 @@ import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; import org.elasticsearch.xpack.core.transform.transforms.SettingsConfigTests; import org.elasticsearch.xpack.core.transform.transforms.SourceConfig; +import org.elasticsearch.xpack.core.transform.transforms.TransformIndexerStats; +import org.elasticsearch.xpack.core.transform.transforms.TransformProgress; import org.elasticsearch.xpack.core.transform.transforms.pivot.AggregationConfig; import org.elasticsearch.xpack.core.transform.transforms.pivot.AggregationConfigTests; import org.elasticsearch.xpack.core.transform.transforms.pivot.GroupConfig; @@ -239,7 +243,30 @@ public void testProcessSearchResponse() { SettingsConfigTests.randomSettingsConfig(), TransformConfigVersion.CURRENT, Collections.emptySet() - ); + ) { + @Override + public Tuple, Map> processSearchResponse( + SearchResponse searchResponse, + String destinationIndex, + String destinationPipeline, + Map fieldTypeMap, + TransformIndexerStats stats, + TransformProgress progress + ) { + try { + return super.processSearchResponse( + searchResponse, + destinationIndex, + destinationPipeline, + fieldTypeMap, + stats, + progress + ); + } finally { + searchResponse.decRef(); + } + } + }; Aggregations aggs = null; assertThat(pivot.processSearchResponse(searchResponseFromAggs(aggs), null, null, null, null, null), is(nullValue())); @@ -324,7 +351,22 @@ public void testPreviewForCompositeAggregation() throws Exception { } private static SearchResponse searchResponseFromAggs(Aggregations aggs) { - return new SearchResponse(null, aggs, null, false, null, null, 1, null, 10, 5, 0, 0, new ShardSearchFailure[0], null); + return new SearchResponse( + SearchHits.EMPTY_WITH_TOTAL_HITS, + aggs, + null, + false, + null, + null, + 1, + null, + 10, + 5, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null + ); } private class MyMockClient extends NoOpClient { diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java index f02b3f865adf0..d97b0bd81a101 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java @@ -100,12 +100,12 @@ public void testExecuteWithAggs() { public void testExecuteAccessHits() throws Exception { CompareCondition condition = new CompareCondition("ctx.payload.hits.hits.0._score", CompareCondition.Op.EQ, 1, Clock.systemUTC()); - SearchHit hit = new SearchHit(0, "1"); + SearchHit hit = SearchHit.unpooled(0, "1"); hit.score(1f); hit.shard(new SearchShardTarget("a", new ShardId("a", "indexUUID", 0), null)); SearchResponse response = new SearchResponse( - new SearchHits(new SearchHit[] { hit }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1f), + SearchHits.unpooled(new SearchHit[] { hit }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1f), null, null, false, diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java index b82622fbd4819..67835971cd15a 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/history/HistoryActionConditionTests.java @@ -180,7 +180,7 @@ public void testActionConditionWithFailures() throws Exception { final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); try { assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); - searchHitReference.set(response.getHits().getAt(0)); + searchHitReference.set(response.getHits().getAt(0).asUnpooled()); } finally { response.decRef(); } @@ -240,7 +240,7 @@ public void testActionCondition() throws Exception { final SearchResponse response = searchHistory(SearchSourceBuilder.searchSource().query(termQuery("watch_id", id))); try { assertThat(response.getHits().getTotalHits().value, is(oneOf(1L, 2L))); - searchHitReference.set(response.getHits().getAt(0)); + searchHitReference.set(response.getHits().getAt(0).asUnpooled()); } finally { response.decRef(); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index 59e646654a18c..19bac967c576a 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -196,7 +196,7 @@ void stopExecutor() {} SearchHit[] hits = new SearchHit[count]; for (int i = 0; i < count; i++) { String id = String.valueOf(i); - SearchHit hit = new SearchHit(1, id); + SearchHit hit = SearchHit.unpooled(1, id); hit.version(1L); hit.shard(new SearchShardTarget("nodeId", new ShardId(watchIndex, 0), "whatever")); hits[i] = hit; @@ -212,7 +212,7 @@ void stopExecutor() {} when(watch.status()).thenReturn(watchStatus); when(parser.parse(eq(id), eq(true), any(), eq(XContentType.JSON), anyLong(), anyLong())).thenReturn(watch); } - SearchHits searchHits = new SearchHits(hits, new TotalHits(count, TotalHits.Relation.EQUAL_TO), 1.0f); + SearchHits searchHits = SearchHits.unpooled(hits, new TotalHits(count, TotalHits.Relation.EQUAL_TO), 1.0f); doAnswer(invocation -> { ActionListener listener = (ActionListener) invocation.getArguments()[2]; ActionListener.respondAndRelease( diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java index ee200dd7912c5..b75ac51c3510f 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java @@ -210,14 +210,14 @@ public void testFindTriggeredWatchesGoodCase() { SearchResponse searchResponse1 = mock(SearchResponse.class); when(searchResponse1.getSuccessfulShards()).thenReturn(1); when(searchResponse1.getTotalShards()).thenReturn(1); - final BytesArray source = new BytesArray("{}"); + BytesArray source = new BytesArray("{}"); { - final SearchHit hit = new SearchHit(0, "first_foo"); + SearchHit hit = SearchHit.unpooled(0, "first_foo"); hit.version(1L); hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null)); hit.sourceRef(source); when(searchResponse1.getHits()).thenReturn( - new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f) + SearchHits.unpooled(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f) ); } when(searchResponse1.getScrollId()).thenReturn("_scrollId"); @@ -228,20 +228,20 @@ public void testFindTriggeredWatchesGoodCase() { return null; }).when(client).execute(eq(TransportSearchAction.TYPE), any(), any()); - // First return a scroll response with a single hit and then with no hits doAnswer(invocation -> { SearchScrollRequest request = (SearchScrollRequest) invocation.getArguments()[1]; @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[2]; if (request.scrollId().equals("_scrollId")) { - final var hit2 = new SearchHit(0, "second_foo"); - hit2.version(1L); - hit2.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null)); - hit2.sourceRef(source); + // First return a scroll response with a single hit and then with no hits + var hit = SearchHit.unpooled(0, "second_foo"); + hit.version(1L); + hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null)); + hit.sourceRef(source); ActionListener.respondAndRelease( listener, new SearchResponse( - new SearchHits(new SearchHit[] { hit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f), + SearchHits.unpooled(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f), null, null, false, From b198f3b4b467a94b9e1d58c7e49ecb9a0b3640ba Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 17 Jan 2024 16:30:03 +0100 Subject: [PATCH 82/95] Remove redundant ResizeResponse (#104464) `ResizeResponse` is the same as `CreateIndexResponse`, both in code and over the wire so it can go away. --- .../indices/create/CreateIndexResponse.java | 2 +- .../admin/indices/shrink/ResizeAction.java | 5 +- .../admin/indices/shrink/ResizeRequest.java | 3 +- .../indices/shrink/ResizeRequestBuilder.java | 5 +- .../admin/indices/shrink/ResizeResponse.java | 44 ------------- .../admin/indices/shrink/ShrinkAction.java | 5 +- .../indices/shrink/TransportResizeAction.java | 13 ++-- .../client/internal/IndicesAdminClient.java | 3 +- .../internal/support/AbstractClient.java | 3 +- .../indices/shrink/ResizeResponseTests.java | 63 ------------------- ...ActionIndicesThatCannotBeCreatedTests.java | 3 +- .../xpack/core/ilm/ShrinkStepTests.java | 10 +-- 12 files changed, 29 insertions(+), 130 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponseTests.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java index 3429457dd7e0f..f0596d061aeb3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexResponse.java @@ -46,7 +46,7 @@ protected static void declareFields(Constructing private final String index; - protected CreateIndexResponse(StreamInput in) throws IOException { + public CreateIndexResponse(StreamInput in) throws IOException { super(in, true); index = in.readString(); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java index dc26e0380fe72..aa838e473bd29 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java @@ -9,14 +9,15 @@ package org.elasticsearch.action.admin.indices.shrink; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -public class ResizeAction extends ActionType { +public class ResizeAction extends ActionType { public static final ResizeAction INSTANCE = new ResizeAction(); public static final String NAME = "indices:admin/resize"; private ResizeAction() { - super(NAME, ResizeResponse::new); + super(NAME, CreateIndexResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index 71270cd61b9ed..c39d2e1114618 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -152,7 +153,7 @@ public String getSourceIndex() { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link ResizeResponse#isShardsAcknowledged()} to + * to be active before returning. Check {@link CreateIndexResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java index a4972d1a98e7d..a18de15037e49 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -8,13 +8,14 @@ package org.elasticsearch.action.admin.indices.shrink; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -public class ResizeRequestBuilder extends AcknowledgedRequestBuilder { +public class ResizeRequestBuilder extends AcknowledgedRequestBuilder { public ResizeRequestBuilder(ElasticsearchClient client) { super(client, ResizeAction.INSTANCE, new ResizeRequest()); } @@ -43,7 +44,7 @@ public ResizeRequestBuilder setSettings(Settings settings) { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link ResizeResponse#isShardsAcknowledged()} to + * to be active before returning. Check {@link CreateIndexResponse#isShardsAcknowledged()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java deleted file mode 100644 index 768fc18397519..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.shrink; - -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; - -/** - * A response for a resize index action, either shrink or split index. - */ -public final class ResizeResponse extends CreateIndexResponse { - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "resize_index", - true, - args -> new ResizeResponse((boolean) args[0], (boolean) args[1], (String) args[2]) - ); - - static { - declareFields(PARSER); - } - - ResizeResponse(StreamInput in) throws IOException { - super(in); - } - - public ResizeResponse(boolean acknowledged, boolean shardsAcknowledged, String index) { - super(acknowledged, shardsAcknowledged, index); - } - - public static ResizeResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java index 8ce69309cf59d..7df58990b69ed 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java @@ -9,14 +9,15 @@ package org.elasticsearch.action.admin.indices.shrink; import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -public class ShrinkAction extends ActionType { +public class ShrinkAction extends ActionType { public static final ShrinkAction INSTANCE = new ShrinkAction(); public static final String NAME = "indices:admin/shrink"; private ShrinkAction() { - super(NAME, ResizeResponse::new); + super(NAME, CreateIndexResponse::new); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java index 5686deb6b804a..fbae64dcb6d45 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; @@ -41,7 +42,7 @@ /** * Main class to initiate resizing (shrink / split) an index into a new index */ -public class TransportResizeAction extends TransportMasterNodeAction { +public class TransportResizeAction extends TransportMasterNodeAction { private final MetadataCreateIndexService createIndexService; private final Client client; @@ -86,7 +87,7 @@ protected TransportResizeAction( actionFilters, ResizeRequest::new, indexNameExpressionResolver, - ResizeResponse::new, + CreateIndexResponse::new, EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.createIndexService = createIndexService; @@ -103,7 +104,7 @@ protected void masterOperation( Task task, final ResizeRequest resizeRequest, final ClusterState state, - final ActionListener listener + final ActionListener listener ) { // there is no need to fetch docs stats for split but we keep it simple and do it anyway for simplicity of the code @@ -136,7 +137,11 @@ protected void masterOperation( createIndexService.createIndex( updateRequest, delegatedListener.map( - response -> new ResizeResponse(response.isAcknowledged(), response.isShardsAcknowledged(), updateRequest.index()) + response -> new CreateIndexResponse( + response.isAcknowledged(), + response.isShardsAcknowledged(), + updateRequest.index() + ) ) ); }) diff --git a/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java index af3325dc6bd8f..d931302740f19 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java @@ -67,7 +67,6 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequestBuilder; -import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -588,7 +587,7 @@ public interface IndicesAdminClient extends ElasticsearchClient { /** * Shrinks an index using an explicit request allowing to specify the settings, mappings and aliases of the target index of the index. */ - void resizeIndex(ResizeRequest request, ActionListener listener); + void resizeIndex(ResizeRequest request, ActionListener listener); /** * Swaps the index pointed to by an alias given all provided conditions are satisfied diff --git a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java index c873ff884b642..c6d9c3a8f3563 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java @@ -194,7 +194,6 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeAction; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequestBuilder; -import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; @@ -1449,7 +1448,7 @@ public ResizeRequestBuilder prepareResizeIndex(String sourceIndex, String target } @Override - public void resizeIndex(ResizeRequest request, ActionListener listener) { + public void resizeIndex(ResizeRequest request, ActionListener listener) { execute(ResizeAction.INSTANCE, request, listener); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponseTests.java deleted file mode 100644 index b9f3e8b89a214..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponseTests.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.shrink; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.test.AbstractXContentSerializingTestCase; -import org.elasticsearch.xcontent.XContentParser; - -public class ResizeResponseTests extends AbstractXContentSerializingTestCase { - - public void testToXContent() { - ResizeResponse response = new ResizeResponse(true, false, "index_name"); - String output = Strings.toString(response); - assertEquals(""" - {"acknowledged":true,"shards_acknowledged":false,"index":"index_name"}""", output); - } - - @Override - protected ResizeResponse doParseInstance(XContentParser parser) { - return ResizeResponse.fromXContent(parser); - } - - @Override - protected ResizeResponse createTestInstance() { - boolean acknowledged = randomBoolean(); - boolean shardsAcknowledged = acknowledged && randomBoolean(); - String index = randomAlphaOfLength(5); - return new ResizeResponse(acknowledged, shardsAcknowledged, index); - } - - @Override - protected Writeable.Reader instanceReader() { - return ResizeResponse::new; - } - - @Override - protected ResizeResponse mutateInstance(ResizeResponse response) { - if (randomBoolean()) { - if (randomBoolean()) { - boolean acknowledged = response.isAcknowledged() == false; - boolean shardsAcknowledged = acknowledged && response.isShardsAcknowledged(); - return new ResizeResponse(acknowledged, shardsAcknowledged, response.index()); - } else { - boolean shardsAcknowledged = response.isShardsAcknowledged() == false; - boolean acknowledged = shardsAcknowledged || response.isAcknowledged(); - return new ResizeResponse(acknowledged, shardsAcknowledged, response.index()); - } - } else { - return new ResizeResponse( - response.isAcknowledged(), - response.isShardsAcknowledged(), - response.index() + randomAlphaOfLengthBetween(2, 5) - ); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java index 1276f6c2db58b..75833052dd4c8 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIndicesThatCannotBeCreatedTests.java @@ -143,8 +143,7 @@ void createIndex(String index, TimeValue timeout, ActionListener { ResizeRequest request = (ResizeRequest) invocation.getArguments()[0]; @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; assertThat(request.getSourceIndex(), equalTo(sourceIndexMetadata.getIndex().getName())); assertThat(request.getTargetIndexRequest().aliases(), equalTo(Collections.emptySet())); @@ -119,7 +119,7 @@ public void testPerformAction() throws Exception { ); } request.setMaxPrimaryShardSize(step.getMaxPrimaryShardSize()); - listener.onResponse(new ResizeResponse(true, true, sourceIndexMetadata.getIndex().getName())); + listener.onResponse(new CreateIndexResponse(true, true, sourceIndexMetadata.getIndex().getName())); return null; }).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any()); @@ -181,8 +181,8 @@ public void testPerformActionIsCompleteForUnAckedRequests() throws Exception { Mockito.doAnswer(invocation -> { @SuppressWarnings("unchecked") - ActionListener listener = (ActionListener) invocation.getArguments()[1]; - listener.onResponse(new ResizeResponse(false, false, indexMetadata.getIndex().getName())); + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + listener.onResponse(new CreateIndexResponse(false, false, indexMetadata.getIndex().getName())); return null; }).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any()); From 4346a409a9ea1c0e93b389f020a65f00eb28570f Mon Sep 17 00:00:00 2001 From: wdongyu <23725216+wdongyu@users.noreply.github.com> Date: Wed, 17 Jan 2024 23:33:14 +0800 Subject: [PATCH 83/95] Fix upsert too long id (#103399) add validation on _id field when upsert --- docs/changelog/103399.yaml | 6 +++++ .../elasticsearch/action/DocWriteRequest.java | 17 ++++++++++++++ .../action/index/IndexRequest.java | 13 +---------- .../action/update/UpdateRequest.java | 2 ++ .../action/update/UpdateRequestTests.java | 22 +++++++++++++++++++ 5 files changed, 48 insertions(+), 12 deletions(-) create mode 100644 docs/changelog/103399.yaml diff --git a/docs/changelog/103399.yaml b/docs/changelog/103399.yaml new file mode 100644 index 0000000000000..440ac90b313f5 --- /dev/null +++ b/docs/changelog/103399.yaml @@ -0,0 +1,6 @@ +pr: 103399 +summary: "add validation on _id field when upsert new doc" +area: Search +type: bug +issues: + - 102981 diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index dab46aed5b4bc..2a9449b35c7b5 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -24,9 +24,11 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.Locale; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.action.index.IndexRequest.MAX_DOCUMENT_ID_LENGTH_IN_BYTES; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; @@ -314,4 +316,19 @@ static ActionRequestValidationException validateSeqNoBasedCASParams( return validationException; } + + static ActionRequestValidationException validateDocIdLength(String id, ActionRequestValidationException validationException) { + if (id != null && id.getBytes(StandardCharsets.UTF_8).length > MAX_DOCUMENT_ID_LENGTH_IN_BYTES) { + validationException = addValidationError( + "id [" + + id + + "] is too long, must be no longer than " + + MAX_DOCUMENT_ID_LENGTH_IN_BYTES + + " bytes but was: " + + id.getBytes(StandardCharsets.UTF_8).length, + validationException + ); + } + return validationException; + } } diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index b5d5b651fb340..285346adcd13f 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -43,7 +43,6 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -266,17 +265,7 @@ public ActionRequestValidationException validate() { validationException = DocWriteRequest.validateSeqNoBasedCASParams(this, validationException); - if (id != null && id.getBytes(StandardCharsets.UTF_8).length > MAX_DOCUMENT_ID_LENGTH_IN_BYTES) { - validationException = addValidationError( - "id [" - + id - + "] is too long, must be no longer than " - + MAX_DOCUMENT_ID_LENGTH_IN_BYTES - + " bytes but was: " - + id.getBytes(StandardCharsets.UTF_8).length, - validationException - ); - } + validationException = DocWriteRequest.validateDocIdLength(id, validationException); if (pipeline != null && pipeline.isEmpty()) { validationException = addValidationError("pipeline cannot be an empty string", validationException); diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 600790b2fd841..800eca618c5bc 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -183,6 +183,8 @@ public ActionRequestValidationException validate() { validationException = DocWriteRequest.validateSeqNoBasedCASParams(this, validationException); + validationException = DocWriteRequest.validateDocIdLength(id, validationException); + if (ifSeqNo != UNASSIGNED_SEQ_NO) { if (retryOnConflict > 0) { validationException = addValidationError("compare and write operations can not be retried", validationException); diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index 735ae41558240..7ee4d2d6bba9b 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -55,6 +55,7 @@ import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; @@ -516,6 +517,27 @@ public void testToValidateUpsertRequestWithVersion() { assertThat(updateRequest.validate().validationErrors(), contains("can't provide version in upsert request")); } + public void testUpdatingRejectsLongIds() { + String id = randomAlphaOfLength(511); + UpdateRequest request = new UpdateRequest("index", id); + request.doc("{}", XContentType.JSON); + ActionRequestValidationException validate = request.validate(); + assertNull(validate); + + id = randomAlphaOfLength(512); + request = new UpdateRequest("index", id); + request.doc("{}", XContentType.JSON); + validate = request.validate(); + assertNull(validate); + + id = randomAlphaOfLength(513); + request = new UpdateRequest("index", id); + request.doc("{}", XContentType.JSON); + validate = request.validate(); + assertThat(validate, notNullValue()); + assertThat(validate.getMessage(), containsString("id [" + id + "] is too long, must be no longer than 512 bytes but was: 513")); + } + public void testValidate() { { UpdateRequest request = new UpdateRequest("index", "id"); From aa42368dbac0722c48931620255ffbf8221eb53f Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Wed, 17 Jan 2024 17:01:28 +0100 Subject: [PATCH 84/95] Revert "Adding threadpool metrics (#102371)" (#104467) This reverts commit afd915af1e767495baede210c17b5769ea5b11b4. --- ...sAvailabilityHealthIndicatorBenchmark.java | 3 +- docs/changelog/102371.yaml | 5 - .../apm/internal/MetricNameValidator.java | 25 ----- .../internal/MetricNameValidatorTests.java | 8 -- .../ingest/geoip/GeoIpDownloaderTests.java | 3 +- .../Netty4SizeHeaderFrameDecoderTests.java | 3 +- .../threadpool/SimpleThreadPoolIT.java | 78 --------------- .../EsRejectedExecutionHandler.java | 11 --- .../elasticsearch/node/NodeConstruction.java | 21 ++-- .../elasticsearch/threadpool/ThreadPool.java | 97 +------------------ .../TransportMultiSearchActionTests.java | 5 +- .../search/TransportSearchActionTests.java | 3 +- .../TransportActionFilterChainTests.java | 6 +- .../AbstractClientHeadersTestCase.java | 3 +- .../http/HttpClientStatsTrackerTests.java | 3 +- .../threadpool/FixedThreadPoolTests.java | 3 +- .../threadpool/ScalingThreadPoolTests.java | 3 +- .../ScheduleWithFixedDelayTests.java | 11 +-- .../ThreadPoolSerializationTests.java | 3 +- .../UpdateThreadPoolSettingsTests.java | 20 ++-- .../ClusterConnectionManagerTests.java | 3 +- .../telemetry/MetricRecorder.java | 7 -- .../telemetry/TestTelemetryPlugin.java | 13 --- .../threadpool/TestThreadPool.java | 3 +- .../authc/AuthenticationServiceTests.java | 1 - .../security/authc/TokenServiceTests.java | 2 - ...InternalEnrollmentTokenGeneratorTests.java | 2 - .../apikey/RestCreateApiKeyActionTests.java | 3 +- .../apikey/RestGetApiKeyActionTests.java | 3 +- .../RestInvalidateApiKeyActionTests.java | 3 +- .../apikey/RestQueryApiKeyActionTests.java | 3 +- .../SecurityNetty4HeaderSizeLimitTests.java | 3 +- 32 files changed, 36 insertions(+), 324 deletions(-) delete mode 100644 docs/changelog/102371.yaml diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java index 8c5de05a01648..ef834fad424e3 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.tasks.TaskManager; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.threadpool.ThreadPool; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; @@ -167,7 +166,7 @@ public void setUp() throws Exception { .build(); Settings settings = Settings.builder().put("node.name", ShardsAvailabilityHealthIndicatorBenchmark.class.getSimpleName()).build(); - ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP); + ThreadPool threadPool = new ThreadPool(settings); ClusterService clusterService = new ClusterService( Settings.EMPTY, diff --git a/docs/changelog/102371.yaml b/docs/changelog/102371.yaml deleted file mode 100644 index 5a698bc9d671a..0000000000000 --- a/docs/changelog/102371.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102371 -summary: Adding threadpool metrics -area: Infra/Core -type: enhancement -issues: [] diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java index 9ab7412426db8..1a698b778687c 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidator.java @@ -32,13 +32,6 @@ public class MetricNameValidator { static final int MAX_ELEMENT_LENGTH = 30; static final int MAX_NUMBER_OF_ELEMENTS = 10; - static final Set SKIP_VALIDATION_METRIC_NAMES_DUE_TO_BWC = Set.of( - "searchable_snapshots_cache_fetch_async", - "searchable_snapshots_cache_prewarming", - "security-token-key", - "security-crypto" - ); - private MetricNameValidator() {} /** @@ -49,10 +42,6 @@ private MetricNameValidator() {} */ public static String validate(String metricName) { Objects.requireNonNull(metricName); - - if (skipValidationToBWC(metricName)) { - return metricName; - } validateMaxMetricNameLength(metricName); String[] elements = metricName.split("\\."); @@ -64,19 +53,6 @@ public static String validate(String metricName) { return metricName; } - /** - * Due to backwards compatibility some metric names would have to skip validation. - * This is for instance where a threadpool name is too long, or contains `-` - * We want to allow to easily find threadpools in code base that are alerting with a metric - * as well as find thread pools metrics in dashboards with their codebase names. - * Renaming a threadpool name would be a breaking change. - * - * NOTE: only allow skipping validation if a refactor in codebase would cause a breaking change - */ - private static boolean skipValidationToBWC(String metricName) { - return SKIP_VALIDATION_METRIC_NAMES_DUE_TO_BWC.stream().anyMatch(m -> metricName.contains(m)); - } - private static void validateMaxMetricNameLength(String metricName) { if (metricName.length() > MAX_METRIC_NAME_LENGTH) { throw new IllegalArgumentException( @@ -132,7 +108,6 @@ private static void hasESPrefix(String[] elements, String name) { private static void perElementValidations(String[] elements, String name) { for (String element : elements) { - hasOnlyAllowedCharacters(element, name); hasNotBreachLengthLimit(element, name); } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java index 9a5479cc65a93..64f78d0af494c 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/MetricNameValidatorTests.java @@ -78,13 +78,6 @@ public void testLastElementAllowList() { expectThrows(IllegalArgumentException.class, () -> MetricNameValidator.validate("es.somemodule.somemetric.some_other_suffix")); } - public void testSkipValidationDueToBWC() { - for (String partOfMetricName : MetricNameValidator.SKIP_VALIDATION_METRIC_NAMES_DUE_TO_BWC) { - MetricNameValidator.validate("es.threadpool." + partOfMetricName + ".total");// fake metric name, but with the part that skips - // validation - } - } - public static String metricNameWithLength(int length) { int prefixAndSuffix = "es.".length() + ".utilization".length(); assert length > prefixAndSuffix : "length too short"; @@ -106,5 +99,4 @@ public static String metricNameWithLength(int length) { metricName.append("utilization"); return metricName.toString(); } - } diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index 915d54c91b259..baf3006378054 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -34,7 +34,6 @@ import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; import org.elasticsearch.threadpool.ThreadPool; @@ -79,7 +78,7 @@ public class GeoIpDownloaderTests extends ESTestCase { public void setup() { httpClient = mock(HttpClient.class); clusterService = mock(ClusterService.class); - threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP); + threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build()); when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings( Settings.EMPTY, diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java index 3e74a74dbd49c..224436a388ce5 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.mocksocket.MockSocket; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportSettings; @@ -52,7 +51,7 @@ public class Netty4SizeHeaderFrameDecoderTests extends ESTestCase { @Before public void startThreadPool() { - threadPool = new ThreadPool(settings, MeterRegistry.NOOP); + threadPool = new ThreadPool(settings); NetworkService networkService = new NetworkService(Collections.emptyList()); PageCacheRecycler recycler = new MockPageCacheRecycler(Settings.EMPTY); nettyTransport = new Netty4Transport( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java index e8950fbb2f9c6..841f77ea7efab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/threadpool/SimpleThreadPoolIT.java @@ -11,11 +11,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.telemetry.InstrumentType; -import org.elasticsearch.telemetry.Measurement; -import org.elasticsearch.telemetry.TestTelemetryPlugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -24,18 +19,12 @@ import java.lang.management.ManagementFactory; import java.lang.management.ThreadInfo; import java.lang.management.ThreadMXBean; -import java.util.ArrayList; -import java.util.Collection; import java.util.HashSet; -import java.util.List; -import java.util.Map; import java.util.Set; import java.util.regex.Pattern; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.in; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class SimpleThreadPoolIT extends ESIntegTestCase { @@ -44,11 +33,6 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return Settings.builder().build(); } - @Override - protected Collection> nodePlugins() { - return List.of(TestTelemetryPlugin.class); - } - public void testThreadNames() throws Exception { ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); Set preNodeStartThreadNames = new HashSet<>(); @@ -111,66 +95,4 @@ public void testThreadNames() throws Exception { } } - public void testThreadPoolMetrics() throws Exception { - internalCluster().startNode(); - - final String dataNodeName = internalCluster().getRandomNodeName(); - final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNodeName) - .filterPlugins(TestTelemetryPlugin.class) - .findFirst() - .orElseThrow(); - - logger.info("do some indexing, flushing, optimize, and searches"); - int numDocs = randomIntBetween(2, 100); - IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; - for (int i = 0; i < numDocs; ++i) { - builders[i] = prepareIndex("idx").setSource( - jsonBuilder().startObject() - .field("str_value", "s" + i) - .array("str_values", new String[] { "s" + (i * 2), "s" + (i * 2 + 1) }) - .field("l_value", i) - .array("l_values", new int[] { i * 2, i * 2 + 1 }) - .field("d_value", i) - .array("d_values", new double[] { i * 2, i * 2 + 1 }) - .endObject() - ); - } - indexRandom(true, builders); - int numSearches = randomIntBetween(2, 100); - for (int i = 0; i < numSearches; i++) { - assertNoFailures(prepareSearch("idx").setQuery(QueryBuilders.termQuery("str_value", "s" + i))); - assertNoFailures(prepareSearch("idx").setQuery(QueryBuilders.termQuery("l_value", i))); - } - plugin.collect(); - final var tp = internalCluster().getInstance(ThreadPool.class, dataNodeName); - ThreadPoolStats tps = tp.stats(); - ArrayList registeredMetrics = plugin.getRegisteredMetrics(InstrumentType.LONG_GAUGE); - registeredMetrics.addAll(plugin.getRegisteredMetrics(InstrumentType.LONG_ASYNC_COUNTER)); - tps.forEach(stats -> { - Map threadPoolMetrics = Map.of( - ThreadPool.THREAD_POOL_METRIC_NAME_COMPLETED, - stats.completed(), - ThreadPool.THREAD_POOL_METRIC_NAME_ACTIVE, - (long) stats.active(), - ThreadPool.THREAD_POOL_METRIC_NAME_CURRENT, - (long) stats.threads(), - ThreadPool.THREAD_POOL_METRIC_NAME_LARGEST, - (long) stats.largest(), - ThreadPool.THREAD_POOL_METRIC_NAME_QUEUE, - (long) stats.queue() - ); - threadPoolMetrics.forEach((suffix, value) -> { - String metricName = ThreadPool.THREAD_POOL_METRIC_PREFIX + stats.name() + suffix; - List measurements; - if (suffix.equals(ThreadPool.THREAD_POOL_METRIC_NAME_COMPLETED)) { - measurements = plugin.getLongAsyncCounterMeasurement(metricName); - } else { - measurements = plugin.getLongGaugeMeasurement(metricName); - } - assertThat(metricName, in(registeredMetrics)); - assertThat(measurements.get(0).value(), equalTo(value)); - }); - }); - } - } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionHandler.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionHandler.java index 9457773eb8071..3878a4a2dff9d 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionHandler.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsRejectedExecutionHandler.java @@ -9,8 +9,6 @@ package org.elasticsearch.common.util.concurrent; import org.elasticsearch.common.metrics.CounterMetric; -import org.elasticsearch.telemetry.metric.LongCounter; -import org.elasticsearch.telemetry.metric.MeterRegistry; import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.ThreadPoolExecutor; @@ -18,7 +16,6 @@ public abstract class EsRejectedExecutionHandler implements RejectedExecutionHandler { private final CounterMetric rejected = new CounterMetric(); - private LongCounter rejectionCounter = null; /** * The number of rejected executions. @@ -29,14 +26,6 @@ public long rejected() { protected void incrementRejections() { rejected.inc(); - if (rejectionCounter != null) { - rejectionCounter.increment(); - } - } - - public void registerCounter(MeterRegistry meterRegistry, String prefix, String name) { - rejectionCounter = meterRegistry.registerLongCounter(prefix + ".rejected.total", "number of rejected threads for " + name, "count"); - rejectionCounter.incrementBy(rejected()); } protected static EsRejectedExecutionException newRejectedException( diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 3cad0232cb2cf..aa62ea689a5a9 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -183,7 +183,6 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.telemetry.TelemetryProvider; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -242,8 +241,8 @@ static NodeConstruction prepareConstruction( NodeConstruction constructor = new NodeConstruction(closeables); Settings settings = constructor.createEnvironment(initialEnvironment, serviceProvider); - TelemetryProvider telemetryProvider = constructor.createTelemetryProvider(settings); - ThreadPool threadPool = constructor.createThreadPool(settings, telemetryProvider.getMeterRegistry()); + + ThreadPool threadPool = constructor.createThreadPool(settings); SettingsModule settingsModule = constructor.validateSettings(initialEnvironment.settings(), settings, threadPool); SearchModule searchModule = constructor.createSearchModule(settingsModule.getSettings(), threadPool); @@ -258,8 +257,7 @@ static NodeConstruction prepareConstruction( scriptService, constructor.createAnalysisRegistry(), serviceProvider, - forbidPrivateIndexSettings, - telemetryProvider + forbidPrivateIndexSettings ); return constructor; @@ -450,14 +448,9 @@ private Settings createEnvironment(Environment initialEnvironment, NodeServicePr return settings; } - private TelemetryProvider createTelemetryProvider(Settings settings) { - return getSinglePlugin(TelemetryPlugin.class).map(p -> p.getTelemetryProvider(settings)).orElse(TelemetryProvider.NOOP); - } - - private ThreadPool createThreadPool(Settings settings, MeterRegistry meterRegistry) throws IOException { + private ThreadPool createThreadPool(Settings settings) throws IOException { ThreadPool threadPool = new ThreadPool( settings, - meterRegistry, pluginsService.flatMap(p -> p.getExecutorBuilders(settings)).toArray(ExecutorBuilder[]::new) ); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); @@ -587,12 +580,13 @@ private void construct( ScriptService scriptService, AnalysisRegistry analysisRegistry, NodeServiceProvider serviceProvider, - boolean forbidPrivateIndexSettings, - TelemetryProvider telemetryProvider + boolean forbidPrivateIndexSettings ) throws IOException { Settings settings = settingsModule.getSettings(); + TelemetryProvider telemetryProvider = getSinglePlugin(TelemetryPlugin.class).map(p -> p.getTelemetryProvider(settings)) + .orElse(TelemetryProvider.NOOP); modules.bindToInstance(Tracer.class, telemetryProvider.getTracer()); TaskManager taskManager = new TaskManager( @@ -604,7 +598,6 @@ private void construct( ).collect(Collectors.toSet()), telemetryProvider.getTracer() ); - final Tracer tracer = telemetryProvider.getTracer(); ClusterService clusterService = createClusterService(settingsModule, threadPool, taskManager); clusterService.addStateApplier(scriptService); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index fef0d93ec86cc..17cafaee19bb4 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -29,11 +29,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.Node; import org.elasticsearch.node.ReportingService; -import org.elasticsearch.telemetry.metric.Instrument; -import org.elasticsearch.telemetry.metric.LongAsyncCounter; -import org.elasticsearch.telemetry.metric.LongGauge; -import org.elasticsearch.telemetry.metric.LongWithAttributes; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -90,13 +85,6 @@ public static class Names { public static final String SYSTEM_CRITICAL_WRITE = "system_critical_write"; } - public static final String THREAD_POOL_METRIC_PREFIX = "es.thread_pool."; - public static final String THREAD_POOL_METRIC_NAME_COMPLETED = ".threads.completed.total"; - public static final String THREAD_POOL_METRIC_NAME_CURRENT = ".threads.count.current"; - public static final String THREAD_POOL_METRIC_NAME_QUEUE = ".threads.queue.size"; - public static final String THREAD_POOL_METRIC_NAME_ACTIVE = ".threads.active.current"; - public static final String THREAD_POOL_METRIC_NAME_LARGEST = ".threads.largest.current"; - public enum ThreadPoolType { DIRECT("direct"), FIXED("fixed"), @@ -165,8 +153,6 @@ public static ThreadPoolType fromType(String type) { private final long slowSchedulerWarnThresholdNanos; - private Map> instruments; - @SuppressWarnings("rawtypes") public Collection builders() { return Collections.unmodifiableCollection(builders.values()); @@ -194,7 +180,7 @@ public Collection builders() { ); @SuppressWarnings({ "rawtypes", "unchecked" }) - public ThreadPool(final Settings settings, MeterRegistry meterRegistry, final ExecutorBuilder... customBuilders) { + public ThreadPool(final Settings settings, final ExecutorBuilder... customBuilders) { assert Node.NODE_NAME_SETTING.exists(settings); final Map builders = new HashMap<>(); @@ -203,7 +189,6 @@ public ThreadPool(final Settings settings, MeterRegistry meterRegistry, final Ex final int halfProcMaxAt5 = halfAllocatedProcessorsMaxFive(allocatedProcessors); final int halfProcMaxAt10 = halfAllocatedProcessorsMaxTen(allocatedProcessors); final int genericThreadPoolMax = boundedBy(4 * allocatedProcessors, 128, 512); - final Map> instruments = new HashMap<>(); builders.put( Names.GENERIC, @@ -322,8 +307,7 @@ public ThreadPool(final Settings settings, MeterRegistry meterRegistry, final Ex executors.put(Names.SAME, new ExecutorHolder(EsExecutors.DIRECT_EXECUTOR_SERVICE, new Info(Names.SAME, ThreadPoolType.DIRECT))); this.executors = Map.copyOf(executors); - this.executors.forEach((k, v) -> instruments.put(k, setupMetrics(meterRegistry, k, v))); - this.instruments = instruments; + final List infos = executors.values() .stream() .filter(holder -> holder.info.getName().equals("same") == false) @@ -340,59 +324,6 @@ public ThreadPool(final Settings settings, MeterRegistry meterRegistry, final Ex this.cachedTimeThread.start(); } - private static ArrayList setupMetrics(MeterRegistry meterRegistry, String name, ExecutorHolder holder) { - Map at = Map.of(); - ArrayList instruments = new ArrayList<>(); - if (holder.executor() instanceof ThreadPoolExecutor threadPoolExecutor) { - String prefix = THREAD_POOL_METRIC_PREFIX + name; - instruments.add( - meterRegistry.registerLongGauge( - prefix + THREAD_POOL_METRIC_NAME_CURRENT, - "number of threads for " + name, - "count", - () -> new LongWithAttributes(threadPoolExecutor.getPoolSize(), at) - ) - ); - instruments.add( - meterRegistry.registerLongGauge( - prefix + THREAD_POOL_METRIC_NAME_QUEUE, - "number queue size for " + name, - "count", - () -> new LongWithAttributes(threadPoolExecutor.getQueue().size(), at) - ) - ); - instruments.add( - meterRegistry.registerLongGauge( - prefix + THREAD_POOL_METRIC_NAME_ACTIVE, - "number of active threads for " + name, - "count", - () -> new LongWithAttributes(threadPoolExecutor.getActiveCount(), at) - ) - ); - instruments.add( - meterRegistry.registerLongGauge( - prefix + THREAD_POOL_METRIC_NAME_LARGEST, - "largest pool size for " + name, - "count", - () -> new LongWithAttributes(threadPoolExecutor.getLargestPoolSize(), at) - ) - ); - instruments.add( - meterRegistry.registerLongAsyncCounter( - prefix + THREAD_POOL_METRIC_NAME_COMPLETED, - "number of completed threads for " + name, - "count", - () -> new LongWithAttributes(threadPoolExecutor.getCompletedTaskCount(), at) - ) - ); - RejectedExecutionHandler rejectedExecutionHandler = threadPoolExecutor.getRejectedExecutionHandler(); - if (rejectedExecutionHandler instanceof EsRejectedExecutionHandler handler) { - handler.registerCounter(meterRegistry, prefix, name); - } - } - return instruments; - } - // for subclassing by tests that don't actually use any of the machinery that the regular constructor sets up protected ThreadPool() { this.builders = Map.of(); @@ -610,33 +541,11 @@ protected final void stopCachedTimeThread() { cachedTimeThread.interrupt(); } - private void closeMetrics(ExecutorHolder executor) { - if (this.instruments.containsKey(executor.info.getName())) { - this.instruments.get(executor.info.getName()).forEach((instrument) -> { - if (instrument instanceof LongAsyncCounter longasynccounter) { - try { - longasynccounter.close(); - } catch (Exception e) { - logger.warn(format("Failed to close LongAsyncCounter for %s. %s", executor.info.getName(), e.getMessage()), e); - } - } else if (instrument instanceof LongGauge longgauge) { - try { - longgauge.close(); - } catch (Exception e) { - logger.warn(format("Failed to close LongGauge for %s. %s", executor.info.getName(), e.getMessage()), e); - } - } - }); - } - this.instruments.remove(executor.info.getName()); - } - public void shutdown() { stopCachedTimeThread(); scheduler.shutdown(); for (ExecutorHolder executor : executors.values()) { if (executor.executor() instanceof ThreadPoolExecutor) { - closeMetrics(executor); executor.executor().shutdown(); } } @@ -647,7 +556,6 @@ public void shutdownNow() { scheduler.shutdownNow(); for (ExecutorHolder executor : executors.values()) { if (executor.executor() instanceof ThreadPoolExecutor) { - closeMetrics(executor); executor.executor().shutdownNow(); } } @@ -657,7 +565,6 @@ public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedE boolean result = scheduler.awaitTermination(timeout, unit); for (ExecutorHolder executor : executors.values()) { if (executor.executor() instanceof ThreadPoolExecutor) { - closeMetrics(executor); result &= executor.executor().awaitTermination(timeout, unit); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index d04e41c83699d..fb27d824417b1 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.tasks.Task; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; @@ -53,7 +52,7 @@ public void testParentTaskId() throws Exception { Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); - ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP); + ThreadPool threadPool = new ThreadPool(settings); try { TransportService transportService = new TransportService( Settings.EMPTY, @@ -121,7 +120,7 @@ public void testBatchExecute() throws ExecutionException, InterruptedException { Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); - ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP); + ThreadPool threadPool = new ThreadPool(settings); TransportService transportService = new TransportService( Settings.EMPTY, mock(Transport.class), diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index e0eed9daa97f6..2271821fc07da 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -76,7 +76,6 @@ import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.telemetry.TelemetryProvider; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.transport.MockTransportService; @@ -1697,7 +1696,7 @@ public void testCCSCompatibilityCheck() throws Exception { ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); - ThreadPool threadPool = new ThreadPool(settings, MeterRegistry.NOOP); + ThreadPool threadPool = new ThreadPool(settings); try { TransportService transportService = MockTransportService.createNewService( Settings.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index 82c204b1d0b88..64ab7a9819190 100644 --- a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.node.Node; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; @@ -51,10 +50,7 @@ public class TransportActionFilterChainTests extends ESTestCase { @Before public void init() throws Exception { counter = new AtomicInteger(); - threadPool = new ThreadPool( - Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "TransportActionFilterChainTests").build(), - MeterRegistry.NOOP - ); + threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "TransportActionFilterChainTests").build()); } @After diff --git a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java index 97c52ef2edc37..5175fee7edceb 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java +++ b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; @@ -77,7 +76,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings, MeterRegistry.NOOP); + threadPool = new ThreadPool(settings); client = buildClient(settings, ACTIONS); } diff --git a/server/src/test/java/org/elasticsearch/http/HttpClientStatsTrackerTests.java b/server/src/test/java/org/elasticsearch/http/HttpClientStatsTrackerTests.java index 2dfaaf34bb1f1..99e99540489c5 100644 --- a/server/src/test/java/org/elasticsearch/http/HttpClientStatsTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/http/HttpClientStatsTrackerTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.Node; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -438,7 +437,7 @@ private static class FakeTimeThreadPool extends ThreadPool { private final long absoluteTimeOffset = randomLong(); FakeTimeThreadPool() { - super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), MeterRegistry.NOOP); + super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build()); stopCachedTimeThread(); setRandomTime(); } diff --git a/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java index 6be78f27135a5..5c355c8009d54 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/FixedThreadPoolTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.telemetry.metric.MeterRegistry; import java.util.concurrent.CountDownLatch; @@ -34,7 +33,7 @@ public void testRejectedExecutionCounter() throws InterruptedException { .put("thread_pool." + threadPoolName + ".queue_size", queueSize) .build(); try { - threadPool = new ThreadPool(nodeSettings, MeterRegistry.NOOP); + threadPool = new ThreadPool(nodeSettings); // these tasks will consume the thread pool causing further // submissions to queue diff --git a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java index 9a0c5c4b75d54..8d7a486ee79f0 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.CheckedRunnable; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.hamcrest.Matcher; import java.util.HashMap; @@ -425,7 +424,7 @@ public void runScalingThreadPoolTest(final Settings settings, final BiConsumer getMeasurements(InstrumentType instrumentType, String n return metrics.get(instrumentType).called.getOrDefault(Objects.requireNonNull(name), Collections.emptyList()); } - public ArrayList getRegisteredMetrics(InstrumentType instrumentType) { - ArrayList registeredMetrics = new ArrayList<>(); - metrics.get(instrumentType).instruments.forEach((name, registration) -> { registeredMetrics.add(name); }); - return registeredMetrics; - } - /** * Get the {@link Registration} for a given elasticsearch {@link Instrument}. */ diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java b/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java index a4c73634dc102..e237f6c9bbb4b 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/TestTelemetryPlugin.java @@ -15,7 +15,6 @@ import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.telemetry.tracing.Tracer; -import java.util.ArrayList; import java.util.List; /** @@ -42,10 +41,6 @@ public List getLongCounterMeasurement(String name) { return meter.getRecorder().getMeasurements(InstrumentType.LONG_COUNTER, name); } - public List getLongAsyncCounterMeasurement(String name) { - return meter.getRecorder().getMeasurements(InstrumentType.LONG_ASYNC_COUNTER, name); - } - public List getDoubleUpDownCounterMeasurement(String name) { return meter.getRecorder().getMeasurements(InstrumentType.DOUBLE_UP_DOWN_COUNTER, name); } @@ -70,18 +65,10 @@ public List getLongHistogramMeasurement(String name) { return meter.getRecorder().getMeasurements(InstrumentType.LONG_HISTOGRAM, name); } - public void collect() { - meter.getRecorder().collect(); - } - public void resetMeter() { meter.getRecorder().resetCalls(); } - public ArrayList getRegisteredMetrics(InstrumentType instrumentType) { - return meter.getRecorder().getRegisteredMetrics(instrumentType); - } - @Override public TelemetryProvider getTelemetryProvider(Settings settings) { return new TelemetryProvider() { diff --git a/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java b/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java index ce8e3a2574f3e..e8a853989e8e5 100644 --- a/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java +++ b/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Releasable; import org.elasticsearch.node.Node; -import org.elasticsearch.telemetry.metric.MeterRegistry; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; @@ -31,7 +30,7 @@ public TestThreadPool(String name, ExecutorBuilder... customBuilders) { } public TestThreadPool(String name, Settings settings, ExecutorBuilder... customBuilders) { - super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), MeterRegistry.NOOP, customBuilders); + super(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), name).put(settings).build(), customBuilders); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 3c6f7462c0bb4..e9a252553fe8d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -265,7 +265,6 @@ public void init() throws Exception { client = mock(Client.class); threadPool = new ThreadPool( settings, - MeterRegistry.NOOP, new FixedExecutorBuilder( settings, THREAD_POOL_NAME, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index adf0b44266260..3c542a33d25e9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -62,7 +62,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; @@ -270,7 +269,6 @@ public void tearDown() throws Exception { public static void startThreadPool() throws IOException { threadPool = new ThreadPool( settings, - MeterRegistry.NOOP, new FixedExecutorBuilder( settings, TokenService.THREAD_POOL_NAME, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java index 3a4e5a404eace..2abbb6a610170 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGeneratorTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.http.HttpInfo; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.node.Node; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -85,7 +84,6 @@ public static void startThreadPool() throws IOException { final Settings settings = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "InternalEnrollmentTokenGeneratorTests").build(); threadPool = new ThreadPool( settings, - MeterRegistry.NOOP, new FixedExecutorBuilder( settings, TokenService.THREAD_POOL_NAME, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java index 0ab9533e62d4c..791aba46c92ea 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestCreateApiKeyActionTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.RestToXContentListener; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -55,7 +54,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings, MeterRegistry.NOOP); + threadPool = new ThreadPool(settings); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index 2ee42b360f02a..a1f696cc5dddd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -61,7 +60,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings, MeterRegistry.NOOP); + threadPool = new ThreadPool(settings); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java index 8bbd051c2fc32..3c0e24da32763 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -54,7 +53,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings, MeterRegistry.NOOP); + threadPool = new ThreadPool(settings); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java index 4f14d8414ebca..67d2ab006eb22 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.search.searchafter.SearchAfterBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; @@ -59,7 +58,7 @@ public void setUp() throws Exception { .put("node.name", "test-" + getTestName()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - threadPool = new ThreadPool(settings, MeterRegistry.NOOP); + threadPool = new ThreadPool(settings); } @Override diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java index 8c422342c3640..c87ddd116b138 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.mocksocket.MockSocket; import org.elasticsearch.tasks.TaskManager; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -78,7 +77,7 @@ public final class SecurityNetty4HeaderSizeLimitTests extends ESTestCase { @Before public void startThreadPool() { - threadPool = new ThreadPool(settings, MeterRegistry.NOOP); + threadPool = new ThreadPool(settings); TaskManager taskManager = new TaskManager(settings, threadPool, Collections.emptySet()); NetworkService networkService = new NetworkService(Collections.emptyList()); PageCacheRecycler recycler = new MockPageCacheRecycler(Settings.EMPTY); From b4d1e952575851910a6c00ee2c2067534b91256d Mon Sep 17 00:00:00 2001 From: Matteo Piergiovanni <134913285+piergm@users.noreply.github.com> Date: Wed, 17 Jan 2024 17:27:42 +0100 Subject: [PATCH 85/95] dyamically adjust node metrics cache expire (#104460) Node metrics cache is now dynamically set to to expire after half the APM reporting interval. --- docs/changelog/104460.yaml | 5 +++++ .../monitor/metrics/NodeMetrics.java | 19 +++++++++++-------- .../elasticsearch/node/NodeConstruction.java | 4 +++- 3 files changed, 19 insertions(+), 9 deletions(-) create mode 100644 docs/changelog/104460.yaml diff --git a/docs/changelog/104460.yaml b/docs/changelog/104460.yaml new file mode 100644 index 0000000000000..c92acdd5cb8ad --- /dev/null +++ b/docs/changelog/104460.yaml @@ -0,0 +1,5 @@ +pr: 104460 +summary: Dyamically adjust node metrics cache expire +area: Search +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java b/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java index 807844d983135..8874c43c919ca 100644 --- a/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java +++ b/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java @@ -37,17 +37,23 @@ public class NodeMetrics extends AbstractLifecycleComponent { private final NodeService nodeService; private final List metrics; private NodeStatsCache stats; + private final TimeValue cacheExpiry; /** * Constructs a new NodeMetrics instance. * - * @param meterRegistry The MeterRegistry used to register metrics. - * @param nodeService The NodeService for interacting with the Elasticsearch node and extracting statistics. - */ - public NodeMetrics(MeterRegistry meterRegistry, NodeService nodeService) { + * @param meterRegistry The MeterRegistry used to register metrics. + * @param nodeService The NodeService for interacting with the Elasticsearch node and extracting statistics. + * @param metricsInterval The interval at which the agent sends metrics to the APM Server + * */ + public NodeMetrics(MeterRegistry meterRegistry, NodeService nodeService, TimeValue metricsInterval) { this.registry = meterRegistry; this.nodeService = nodeService; this.metrics = new ArrayList<>(17); + // we set the cache to expire after half the interval at which the agent sends + // metrics to the APM Server so that there is enough time for the cache not + // update during the same poll period and that expires before a new poll period + this.cacheExpiry = new TimeValue(metricsInterval.getMillis() / 2); } /** @@ -57,10 +63,7 @@ public NodeMetrics(MeterRegistry meterRegistry, NodeService nodeService) { * @param registry The MeterRegistry used to register and collect metrics. */ private void registerAsyncMetrics(MeterRegistry registry) { - // Agent should poll stats every 4 minutes and being this cache is lazy we need a - // number high enough so that the cache does not update during the same poll - // period and that expires before a new poll period, therefore we choose 1 minute. - this.stats = new NodeStatsCache(TimeValue.timeValueMinutes(1)); + this.stats = new NodeStatsCache(cacheExpiry); metrics.add( registry.registerLongAsyncCounter( "es.indices.get.total", diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index aa62ea689a5a9..d61d09cdac498 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -78,6 +78,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.env.Environment; @@ -966,7 +967,8 @@ record PluginServiceInstances( repositoryService ); - final NodeMetrics nodeMetrics = new NodeMetrics(telemetryProvider.getMeterRegistry(), nodeService); + final TimeValue metricsInterval = settings.getAsTime("tracing.apm.agent.metrics_interval", TimeValue.timeValueSeconds(10)); + final NodeMetrics nodeMetrics = new NodeMetrics(telemetryProvider.getMeterRegistry(), nodeService, metricsInterval); final SearchService searchService = serviceProvider.newSearchService( pluginsService, From e4feaff900e259f6accd3ea3756652fe5f0a594d Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 17 Jan 2024 11:32:46 -0500 Subject: [PATCH 86/95] Add support for more than one inner_hit when searching nested vectors (#104006) This commit adds the ability to gather more than one inner_hit when searching nested kNN. # Global kNN example ``` POST test/_search { "_source": false, "fields": [ "name" ], "knn": { "field": "nested.vector", "query_vector": [ -0.5, 90, -10, 14.8, -156 ], "k": 3, "num_candidates": 3, "inner_hits": { "size": 2, "fields": [ "nested.paragraph_id" ], "_source": false } } } ``` Results in
    ``` { "took": 66, "timed_out": false, "_shards": { "total": 2, "successful": 2, "skipped": 0, "failed": 0 }, "hits": { "total": { "value": 2, "relation": "eq" }, "max_score": 0.009090909, "hits": [ { "_index": "test", "_id": "2", "_score": 0.009090909, "fields": { "name": [ "moose.jpg" ] }, "inner_hits": { "nested": { "hits": { "total": { "value": 2, "relation": "eq" }, "max_score": 0.009090909, "hits": [ { "_index": "test", "_id": "2", "_nested": { "field": "nested", "offset": 0 }, "_score": 0.009090909, "fields": { "nested": [ { "paragraph_id": [ "0" ] } ] } }, { "_index": "test", "_id": "2", "_nested": { "field": "nested", "offset": 1 }, "_score": 0.004968944, "fields": { "nested": [ { "paragraph_id": [ "2" ] } ] } } ] } } } }, { "_index": "test", "_id": "3", "_score": 0.0021519717, "fields": { "name": [ "rabbit.jpg" ] }, "inner_hits": { "nested": { "hits": { "total": { "value": 1, "relation": "eq" }, "max_score": 0.0021519717, "hits": [ { "_index": "test", "_id": "3", "_nested": { "field": "nested", "offset": 0 }, "_score": 0.0021519717, "fields": { "nested": [ { "paragraph_id": [ "0" ] } ] } } ] } } } } ] } } ```
    # kNN Query example With a kNN query, this opens an interesting door, which allows for multiple inner_hit scoring schemes. ## Nearest by max passage only ``` POST test/_search { "size": 3, "query": { "nested": { "path": "nested", "score_mode": "max", "query": { "knn": { "field": "nested.vector", "query_vector": [ -0.5, 90, -10, 14.8, -156 ], "num_candidates": 5 } }, "inner_hits": { "size": 2, "_source": false, "fields": [ "nested.paragraph_id" ] } } } } ``` closes: https://github.com/elastic/elasticsearch/issues/102950 --- docs/changelog/104006.yaml | 5 + .../search-your-data/knn-search.asciidoc | 10 +- .../search.vectors/100_knn_nested_search.yml | 170 +++++++++++++++++- .../130_knn_query_nested_search.yml | 110 +++++++++++- .../search/nested/VectorNestedIT.java | 6 +- .../org/elasticsearch/TransportVersions.java | 1 + .../action/search/DfsQueryPhase.java | 6 +- .../vectors/DenseVectorFieldMapper.java | 72 +++++++- .../index/query/AbstractQueryBuilder.java | 14 ++ .../index/query/InnerHitsRewriteContext.java | 35 ++++ .../index/query/QueryRewriteContext.java | 4 + .../elasticsearch/search/SearchModule.java | 4 + .../elasticsearch/search/SearchService.java | 11 +- .../elasticsearch/search/dfs/DfsPhase.java | 4 + .../search/vectors/ExactKnnQueryBuilder.java | 116 ++++++++++++ .../vectors/KnnScoreDocQueryBuilder.java | 47 ++++- .../search/vectors/KnnSearchBuilder.java | 8 + .../search/vectors/KnnVectorQueryBuilder.java | 4 + .../action/search/DfsQueryPhaseTests.java | 19 +- .../vectors/DenseVectorFieldTypeTests.java | 63 +++++++ .../index/query/NestedQueryBuilderTests.java | 49 +++++ .../search/SearchModuleTests.java | 1 + .../vectors/ExactKnnQueryBuilderTests.java | 122 +++++++++++++ .../vectors/KnnScoreDocQueryBuilderTests.java | 53 +++++- 24 files changed, 908 insertions(+), 26 deletions(-) create mode 100644 docs/changelog/104006.yaml create mode 100644 server/src/main/java/org/elasticsearch/index/query/InnerHitsRewriteContext.java create mode 100644 server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java create mode 100644 server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java diff --git a/docs/changelog/104006.yaml b/docs/changelog/104006.yaml new file mode 100644 index 0000000000000..d840502cdefbe --- /dev/null +++ b/docs/changelog/104006.yaml @@ -0,0 +1,5 @@ +pr: 104006 +summary: Add support for more than one `inner_hit` when searching nested vectors +area: Vector Search +type: enhancement +issues: [] diff --git a/docs/reference/search/search-your-data/knn-search.asciidoc b/docs/reference/search/search-your-data/knn-search.asciidoc index a847d9a306b7c..a68cacec8c10c 100644 --- a/docs/reference/search/search-your-data/knn-search.asciidoc +++ b/docs/reference/search/search-your-data/knn-search.asciidoc @@ -821,9 +821,6 @@ Now we have filtered based on the top level `"creation_time"` and only one docum Additionally, if you wanted to extract the nearest passage for a matched document, you can supply <> to the `knn` clause. -NOTE: `inner_hits` for kNN will only ever return a single hit, the nearest passage vector. -Setting `"size"` to any value greater than `1` will have no effect on the results. - NOTE: When using `inner_hits` and multiple `knn` clauses, be sure to specify the <> field. Otherwise, a naming clash can occur and fail the search request. @@ -848,7 +845,8 @@ POST passage_vectors/_search "_source": false, "fields": [ "paragraph.text" - ] + ], + "size": 1 } } } @@ -891,7 +889,7 @@ Now the result will contain the nearest found paragraph when searching. "paragraph": { "hits": { "total": { - "value": 1, + "value": 2, "relation": "eq" }, "max_score": 1.0, @@ -935,7 +933,7 @@ Now the result will contain the nearest found paragraph when searching. "paragraph": { "hits": { "total": { - "value": 1, + "value": 2, "relation": "eq" }, "max_score": 0.9997144, diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml index f44461e7b8143..c69e22d274c8e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/100_knn_nested_search.yml @@ -6,6 +6,9 @@ setup: indices.create: index: test body: + settings: + index: + number_of_shards: 2 mappings: properties: name: @@ -135,6 +138,172 @@ setup: - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} - match: {hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0"} --- +"nested kNN search inner_hits size > 1": + - skip: + version: ' - 8.12.99' + reason: 'inner_hits on nested kNN search added in 8.13' + + - do: + index: + index: test + id: "4" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + - paragraph_id: 2 + vector: [ 0, 100.0, 0, 14.8, -156.0 ] + - paragraph_id: 3 + vector: [ 0, 1.0, 0, 1.8, -15.0 ] + + - do: + index: + index: test + id: "5" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + - paragraph_id: 2 + vector: [ 0, 100.0, 0, 14.8, -156.0 ] + - paragraph_id: 3 + vector: [ 0, 1.0, 0, 1.8, -15.0 ] + + - do: + index: + index: test + id: "6" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + - paragraph_id: 2 + vector: [ 0, 100.0, 0, 14.8, -156.0 ] + - paragraph_id: 3 + vector: [ 0, 1.0, 0, 1.8, -15.0 ] + - do: + indices.refresh: { } + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 3 + num_candidates: 5 + inner_hits: {size: 2, fields: ["nested.paragraph_id"], _source: false} + + - match: {hits.total.value: 3} + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - length: { hits.hits.1.inner_hits.nested.hits.hits: 2 } + - length: { hits.hits.2.inner_hits.nested.hits.hits: 2 } + + - match: { hits.hits.0.fields.name.0: "moose.jpg" } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 5 + num_candidates: 5 + inner_hits: {size: 2, fields: ["nested.paragraph_id"], _source: false} + + - match: {hits.total.value: 5} + # All these initial matches are "moose.jpg", which has 3 nested vectors, but two are closest + - match: {hits.hits.0.fields.name.0: "moose.jpg"} + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.0.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.1.fields.name.0: "moose.jpg"} + - length: { hits.hits.1.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.1.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.1.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.2.fields.name.0: "moose.jpg"} + - length: { hits.hits.2.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.2.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.2.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.3.fields.name.0: "moose.jpg"} + - length: { hits.hits.3.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.3.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.3.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + # Rabbit only has one passage vector + - match: {hits.hits.4.fields.name.0: "rabbit.jpg"} + - length: { hits.hits.4.inner_hits.nested.hits.hits: 1 } + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + k: 3 + num_candidates: 3 + filter: {term: {name: "cow.jpg"}} + inner_hits: {size: 3, fields: ["nested.paragraph_id"], _source: false} + + - match: {hits.total.value: 1} + - match: { hits.hits.0._id: "1" } + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.0.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "1" } +--- +"nested kNN search inner_hits & boosting": + - skip: + version: ' - 8.12.99' + reason: 'inner_hits on nested kNN search added in 8.13' + features: close_to + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 3 + num_candidates: 5 + inner_hits: {size: 2, fields: ["nested.paragraph_id"], _source: false} + + - close_to: { hits.hits.0._score: {value: 0.00909090, error: 0.00001} } + - close_to: { hits.hits.0.inner_hits.nested.hits.hits.0._score: {value: 0.00909090, error: 0.00001} } + - close_to: { hits.hits.1._score: {value: 0.0021519717, error: 0.00001} } + - close_to: { hits.hits.1.inner_hits.nested.hits.hits.0._score: {value: 0.0021519717, error: 0.00001} } + - close_to: { hits.hits.2._score: {value: 0.00001, error: 0.00001} } + - close_to: { hits.hits.2.inner_hits.nested.hits.hits.0._score: {value: 0.00001, error: 0.00001} } + + - do: + search: + index: test + body: + fields: [ "name" ] + knn: + field: nested.vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 3 + num_candidates: 5 + boost: 2 + inner_hits: {size: 2, fields: ["nested.paragraph_id"], _source: false} + - close_to: { hits.hits.0._score: {value: 0.0181818, error: 0.00001} } + - close_to: { hits.hits.0.inner_hits.nested.hits.hits.0._score: {value: 0.0181818, error: 0.00001} } + - close_to: { hits.hits.1._score: {value: 0.0043039434, error: 0.00001} } + - close_to: { hits.hits.1.inner_hits.nested.hits.hits.0._score: {value: 0.0043039434, error: 0.00001} } + - close_to: { hits.hits.2._score: {value: 0.00002, error: 0.00001} } + - close_to: { hits.hits.2.inner_hits.nested.hits.hits.0._score: {value: 0.00002, error: 0.00001} } +--- "nested kNN search inner_hits & profiling": - skip: version: ' - 8.12.99' @@ -144,7 +313,6 @@ setup: index: test body: profile: true - _source: false fields: [ "name" ] knn: field: nested.vector diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml index 435291b454d08..5d07c0c8b5f9d 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/130_knn_query_nested_search.yml @@ -186,7 +186,6 @@ setup: - match: {hits.hits.0.fields.name.0: "rabbit.jpg"} - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } --- - "nested kNN search post-filtered on nested fields DOES NOT work": - do: search: @@ -211,3 +210,112 @@ setup: # TODO: fix it on Lucene level so nested knn respects num_candidates # or do pre-filtering - match: {hits.total.value: 0} +--- +"nested kNN search inner_hits size > 1": + - skip: + version: ' - 8.12.99' + reason: 'inner_hits on nested kNN search added in 8.13' + + - do: + index: + index: test + id: "4" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + - paragraph_id: 2 + vector: [ 0, 100.0, 0, 14.8, -156.0 ] + - paragraph_id: 3 + vector: [ 0, 1.0, 0, 1.8, -15.0 ] + + - do: + index: + index: test + id: "5" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + - paragraph_id: 2 + vector: [ 0, 100.0, 0, 14.8, -156.0 ] + - paragraph_id: 3 + vector: [ 0, 1.0, 0, 1.8, -15.0 ] + + - do: + index: + index: test + id: "6" + body: + name: moose.jpg + nested: + - paragraph_id: 0 + vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + - paragraph_id: 2 + vector: [ 0, 100.0, 0, 14.8, -156.0 ] + - paragraph_id: 3 + vector: [ 0, 1.0, 0, 1.8, -15.0 ] + - do: + indices.refresh: { } + + - do: + search: + index: test + size: 3 + body: + fields: [ "name" ] + query: + nested: + path: nested + query: + knn: + field: nested.vector + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + num_candidates: 5 + inner_hits: { size: 2, "fields": [ "nested.paragraph_id" ], _source: false } + + - match: {hits.total.value: 5} + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - length: { hits.hits.1.inner_hits.nested.hits.hits: 2 } + - length: { hits.hits.2.inner_hits.nested.hits.hits: 2 } + + + - do: + search: + index: test + size: 5 + body: + fields: [ "name" ] + query: + nested: + path: nested + query: + knn: + field: nested.vector + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + num_candidates: 5 + inner_hits: { size: 2, "fields": [ "nested.paragraph_id" ], _source: false } + + - match: {hits.total.value: 5} + # All these initial matches are "moose.jpg", which has 3 nested vectors, but two are closest + - match: {hits.hits.0.fields.name.0: "moose.jpg"} + - length: { hits.hits.0.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.0.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.1.fields.name.0: "moose.jpg"} + - length: { hits.hits.1.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.1.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.1.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.2.fields.name.0: "moose.jpg"} + - length: { hits.hits.2.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.2.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.2.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + - match: {hits.hits.3.fields.name.0: "moose.jpg"} + - length: { hits.hits.3.inner_hits.nested.hits.hits: 2 } + - match: { hits.hits.3.inner_hits.nested.hits.hits.0.fields.nested.0.paragraph_id.0: "0" } + - match: { hits.hits.3.inner_hits.nested.hits.hits.1.fields.nested.0.paragraph_id.0: "2" } + # Rabbit only has one passage vector + - match: {hits.hits.4.fields.name.0: "rabbit.jpg"} + - length: { hits.hits.4.inner_hits.nested.hits.hits: 1 } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java index 3dd9e68cf08af..f830ca9ac0cb6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/nested/VectorNestedIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.query.InnerHitBuilder; import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.test.ESIntegTestCase; @@ -66,8 +67,9 @@ public void testSimpleNested() throws Exception { refresh(); assertResponse( - prepareSearch("test").setKnnSearch(List.of(new KnnSearchBuilder("nested.vector", new float[] { 1, 1, 1 }, 1, 1, null))) - .setAllowPartialSearchResults(false), + prepareSearch("test").setKnnSearch( + List.of(new KnnSearchBuilder("nested.vector", new float[] { 1, 1, 1 }, 1, 1, null).innerHit(new InnerHitBuilder())) + ).setAllowPartialSearchResults(false), response -> assertThat(response.getHits().getHits().length, greaterThan(0)) ); } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index fb83ecd51d59f..a730587f32c20 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -187,6 +187,7 @@ static TransportVersion def(int id) { public static final TransportVersion DATE_HISTOGRAM_SUPPORT_DOWNSAMPLED_TZ = def(8_574_00_0); public static final TransportVersion PEERFINDER_REPORTS_PEERS_MASTERS = def(8_575_00_0); public static final TransportVersion ESQL_MULTI_CLUSTERS_ENRICH = def(8_576_00_0); + public static final TransportVersion NESTED_KNN_MORE_INNER_HITS = def(8_577_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index 77f1931f62537..0c9d6ba12a27a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -151,7 +151,11 @@ ShardSearchRequest rewriteShardSearchRequest(ShardSearchRequest request) { } scoreDocs.sort(Comparator.comparingInt(scoreDoc -> scoreDoc.doc)); String nestedPath = dfsKnnResults.getNestedPath(); - QueryBuilder query = new KnnScoreDocQueryBuilder(scoreDocs.toArray(new ScoreDoc[0])); + QueryBuilder query = new KnnScoreDocQueryBuilder( + scoreDocs.toArray(new ScoreDoc[0]), + source.knnSearch().get(i).getField(), + source.knnSearch().get(i).getQueryVector() + ).boost(source.knnSearch().get(i).boost()); if (nestedPath != null) { query = new NestedQueryBuilder(nestedPath, query, ScoreMode.Max).innerHit(source.knnSearch().get(i).innerHit()); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 27424d4591ba6..f165361ded105 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -29,6 +29,15 @@ import org.apache.lucene.index.SegmentWriteState; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; +import org.apache.lucene.queries.function.FunctionQuery; +import org.apache.lucene.queries.function.valuesource.ByteKnnVectorFieldSource; +import org.apache.lucene.queries.function.valuesource.ByteVectorSimilarityFunction; +import org.apache.lucene.queries.function.valuesource.ConstKnnByteVectorValueSource; +import org.apache.lucene.queries.function.valuesource.ConstKnnFloatValueSource; +import org.apache.lucene.queries.function.valuesource.FloatKnnVectorFieldSource; +import org.apache.lucene.queries.function.valuesource.FloatVectorSimilarityFunction; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.KnnByteVectorQuery; import org.apache.lucene.search.Query; @@ -1063,6 +1072,67 @@ public Query createKnnQuery( return knnQuery; } + public Query createExactKnnQuery(float[] queryVector) { + if (isIndexed() == false) { + throw new IllegalArgumentException( + "to perform knn search on field [" + name() + "], its mapping must have [index] set to [true]" + ); + } + if (queryVector.length != dims) { + throw new IllegalArgumentException( + "the query vector has a different dimension [" + queryVector.length + "] than the index vectors [" + dims + "]" + ); + } + elementType.checkVectorBounds(queryVector); + if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { + float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); + elementType.checkVectorMagnitude(similarity, ElementType.errorFloatElementsAppender(queryVector), squaredMagnitude); + if (similarity == VectorSimilarity.COSINE + && ElementType.FLOAT.equals(elementType) + && indexVersionCreated.onOrAfter(NORMALIZE_COSINE) + && isNotUnitVector(squaredMagnitude)) { + float length = (float) Math.sqrt(squaredMagnitude); + queryVector = Arrays.copyOf(queryVector, queryVector.length); + for (int i = 0; i < queryVector.length; i++) { + queryVector[i] /= length; + } + } + } + VectorSimilarityFunction vectorSimilarityFunction = similarity.vectorSimilarityFunction(indexVersionCreated, elementType); + return switch (elementType) { + case BYTE -> { + byte[] bytes = new byte[queryVector.length]; + for (int i = 0; i < queryVector.length; i++) { + bytes[i] = (byte) queryVector[i]; + } + yield new BooleanQuery.Builder().add(new FieldExistsQuery(name()), BooleanClause.Occur.FILTER) + .add( + new FunctionQuery( + new ByteVectorSimilarityFunction( + vectorSimilarityFunction, + new ByteKnnVectorFieldSource(name()), + new ConstKnnByteVectorValueSource(bytes) + ) + ), + BooleanClause.Occur.SHOULD + ) + .build(); + } + case FLOAT -> new BooleanQuery.Builder().add(new FieldExistsQuery(name()), BooleanClause.Occur.FILTER) + .add( + new FunctionQuery( + new FloatVectorSimilarityFunction( + vectorSimilarityFunction, + new FloatKnnVectorFieldSource(name()), + new ConstKnnFloatValueSource(queryVector) + ) + ), + BooleanClause.Occur.SHOULD + ) + .build(); + }; + } + public Query createKnnQuery( float[] queryVector, int numCands, @@ -1082,7 +1152,6 @@ public Query createKnnQuery( ); } elementType.checkVectorBounds(queryVector); - if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); elementType.checkVectorMagnitude(similarity, ElementType.errorFloatElementsAppender(queryVector), squaredMagnitude); @@ -1110,6 +1179,7 @@ && isNotUnitVector(squaredMagnitude)) { case FLOAT -> parentFilter != null ? new ProfilingDiversifyingChildrenFloatKnnVectorQuery(name(), queryVector, filter, numCands, parentFilter) : new ProfilingKnnFloatVectorQuery(name(), queryVector, numCands, filter); + }; if (similarityThreshold != null) { diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index f86142ffbe862..d3d7b46d3d729 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -296,6 +296,10 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws if (queryRewriteContext == null) { return this; } + final InnerHitsRewriteContext ihrc = queryRewriteContext.convertToInnerHitsRewriteContext(); + if (ihrc != null) { + return doInnerHitsRewrite(ihrc); + } final CoordinatorRewriteContext crc = queryRewriteContext.convertToCoordinatorRewriteContext(); if (crc != null) { return doCoordinatorRewrite(crc); @@ -342,6 +346,16 @@ protected QueryBuilder doIndexMetadataRewrite(final QueryRewriteContext context) return this; } + /** + * Optional rewrite logic that allows for optimization for extracting inner hits + * @param context an {@link InnerHitsRewriteContext} instance + * @return A {@link QueryBuilder} representing the rewritten query optimized for inner hit extraction + * @throws IOException if an error occurs while rewriting the query + */ + protected QueryBuilder doInnerHitsRewrite(final InnerHitsRewriteContext context) throws IOException { + return this; + } + /** * For internal usage only! * diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitsRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitsRewriteContext.java new file mode 100644 index 0000000000000..0b437fa451e1b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitsRewriteContext.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.index.query; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.xcontent.XContentParserConfiguration; + +import java.util.function.LongSupplier; + +/** + * Context object used to rewrite {@link QueryBuilder} instances into an optimized version for extracting inner_hits. + */ +public final class InnerHitsRewriteContext extends QueryRewriteContext { + public InnerHitsRewriteContext(final XContentParserConfiguration parserConfiguration, final LongSupplier nowInMillis) { + super(parserConfiguration, null, nowInMillis); + } + + @Override + public InnerHitsRewriteContext convertToInnerHitsRewriteContext() { + return this; + } + + @Override + @SuppressWarnings({ "rawtypes" }) + public void executeAsyncActions(ActionListener listener) { + // InnerHitsRewriteContext does not support async actions at all, and doesn't supply a valid `client` object + throw new UnsupportedOperationException("InnerHitsRewriteContext does not support async actions"); + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java index 9a8800c05bdb2..e36c4d608d59f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryRewriteContext.java @@ -159,6 +159,10 @@ public DataRewriteContext convertToDataRewriteContext() { return null; } + public InnerHitsRewriteContext convertToInnerHitsRewriteContext() { + return null; + } + /** * Returns the {@link MappedFieldType} for the provided field name. * If the field is not mapped, the behaviour depends on the index.query.parse.allow_unmapped_fields setting, which defaults to true. diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 8ba48563c8f55..5b17203ded132 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -245,6 +245,7 @@ import org.elasticsearch.search.suggest.phrase.StupidBackoff; import org.elasticsearch.search.suggest.term.TermSuggestion; import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; +import org.elasticsearch.search.vectors.ExactKnnQueryBuilder; import org.elasticsearch.search.vectors.KnnScoreDocQueryBuilder; import org.elasticsearch.search.vectors.KnnVectorQueryBuilder; import org.elasticsearch.search.vectors.QueryVectorBuilder; @@ -1130,6 +1131,9 @@ private void registerQueryParsers(List plugins) { registerQuery(new QuerySpec<>(KnnScoreDocQueryBuilder.NAME, KnnScoreDocQueryBuilder::new, parser -> { throw new IllegalArgumentException("[score_doc] queries cannot be provided directly"); })); + registerQuery(new QuerySpec<>(ExactKnnQueryBuilder.NAME, ExactKnnQueryBuilder::new, parser -> { + throw new IllegalArgumentException("[exact_knn] queries cannot be provided directly"); + })); registerFromPlugin(plugins, SearchPlugin::getQueries, this::registerQuery); diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 8a03c7e9f08ba..d5b2565187a3f 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -53,6 +53,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.CoordinatorRewriteContextProvider; import org.elasticsearch.index.query.InnerHitContextBuilder; +import org.elasticsearch.index.query.InnerHitsRewriteContext; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -1234,13 +1235,19 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc context.size(source.size()); Map innerHitBuilders = new HashMap<>(); QueryBuilder query = source.query(); + InnerHitsRewriteContext innerHitsRewriteContext = new InnerHitsRewriteContext( + context.getSearchExecutionContext().getParserConfig(), + context::getRelativeTimeInMillis + ); if (query != null) { - InnerHitContextBuilder.extractInnerHits(query, innerHitBuilders); + QueryBuilder rewrittenForInnerHits = Rewriteable.rewrite(query, innerHitsRewriteContext, true); + InnerHitContextBuilder.extractInnerHits(rewrittenForInnerHits, innerHitBuilders); searchExecutionContext.setAliasFilter(context.request().getAliasFilter().getQueryBuilder()); context.parsedQuery(searchExecutionContext.toQuery(query)); } if (source.postFilter() != null) { - InnerHitContextBuilder.extractInnerHits(source.postFilter(), innerHitBuilders); + QueryBuilder rewrittenForInnerHits = Rewriteable.rewrite(source.postFilter(), innerHitsRewriteContext, true); + InnerHitContextBuilder.extractInnerHits(rewrittenForInnerHits, innerHitBuilders); context.parsedPostFilter(searchExecutionContext.toQuery(source.postFilter())); } if (innerHitBuilders.size() > 0) { diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java index 5d3288408c99b..dab127e8b4e56 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsPhase.java @@ -42,6 +42,8 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.index.query.AbstractQueryBuilder.DEFAULT_BOOST; + /** * DFS phase of a search request, used to make scoring 100% accurate by collecting additional info from each shard before the query phase. * The additional information is used to better compare the scores coming from all the shards, which depend on local factors (e.g. idf). @@ -181,6 +183,8 @@ private static void executeKnnVectorQuery(SearchContext context) throws IOExcept SearchExecutionContext searchExecutionContext = context.getSearchExecutionContext(); List knnSearch = context.request().source().knnSearch(); List knnVectorQueryBuilders = knnSearch.stream().map(KnnSearchBuilder::toQueryBuilder).toList(); + // Since we apply boost during the DfsQueryPhase, we should not apply boost here: + knnVectorQueryBuilders.forEach(knnVectorQueryBuilder -> knnVectorQueryBuilder.boost(DEFAULT_BOOST)); if (context.request().getAliasFilter().getQueryBuilder() != null) { for (KnnVectorQueryBuilder knnVectorQueryBuilder : knnVectorQueryBuilders) { diff --git a/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java new file mode 100644 index 0000000000000..d292f61dcb085 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilder.java @@ -0,0 +1,116 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.apache.lucene.search.Query; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +/** + * Exact knn query builder. Will iterate and score all documents that have the provided knn field in the index. + * Useful in inner hits scoring scenarios. + */ +public class ExactKnnQueryBuilder extends AbstractQueryBuilder { + public static final String NAME = "exact_knn"; + private final String field; + private final float[] query; + + /** + * Creates a query builder. + * + * @param query the query vector + * @param field the field that was used for the kNN query + */ + public ExactKnnQueryBuilder(float[] query, String field) { + this.query = query; + this.field = field; + } + + public ExactKnnQueryBuilder(StreamInput in) throws IOException { + super(in); + this.query = in.readFloatArray(); + this.field = in.readString(); + } + + String getField() { + return field; + } + + float[] getQuery() { + return query; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeFloatArray(query); + out.writeString(field); + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.field("query", query); + builder.field("field", field); + boostAndQueryNameToXContent(builder); + builder.endObject(); + } + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + final MappedFieldType fieldType = context.getFieldType(field); + if (fieldType == null) { + throw new IllegalArgumentException("field [" + field + "] does not exist in the mapping"); + } + if (fieldType instanceof DenseVectorFieldMapper.DenseVectorFieldType == false) { + throw new IllegalArgumentException( + "[" + NAME + "] queries are only supported on [" + DenseVectorFieldMapper.CONTENT_TYPE + "] fields" + ); + } + final DenseVectorFieldMapper.DenseVectorFieldType vectorFieldType = (DenseVectorFieldMapper.DenseVectorFieldType) fieldType; + return vectorFieldType.createExactKnnQuery(query); + } + + @Override + protected boolean doEquals(ExactKnnQueryBuilder other) { + return field.equals(other.field) && Arrays.equals(query, other.query); + } + + @Override + protected int doHashCode() { + return Objects.hash(field, Arrays.hashCode(query)); + } + + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException { + return this; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.NESTED_KNN_MORE_INNER_HITS; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java index 13ca1d3dc1db2..ea9b2df942808 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java @@ -35,6 +35,8 @@ public class KnnScoreDocQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "knn_score_doc"; private final ScoreDoc[] scoreDocs; + private final String fieldName; + private final float[] queryVector; /** * Creates a query builder. @@ -42,13 +44,26 @@ public class KnnScoreDocQueryBuilder extends AbstractQueryBuilder rewrittenQueries = new ArrayList<>(filterQueries.size()); for (QueryBuilder query : filterQueries) { @@ -260,6 +263,7 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { DenseVectorFieldType vectorFieldType = (DenseVectorFieldType) fieldType; String parentPath = context.nestedLookup().getNestedParent(fieldName); + if (parentPath != null) { NestedObjectMapper originalObjectMapper = context.nestedScope().getObjectMapper(); if (originalObjectMapper != null) { diff --git a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java index 1736449f7cbdf..e9ff8336ef4c9 100644 --- a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java @@ -336,15 +336,28 @@ public void testRewriteShardSearchRequestWithRank() { QueryBuilder bm25 = new TermQueryBuilder("field", "term"); SearchSourceBuilder ssb = new SearchSourceBuilder().query(bm25) - .knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 0.0f }, 10, 100, null))) + .knnSearch( + List.of( + new KnnSearchBuilder("vector", new float[] { 0.0f }, 10, 100, null), + new KnnSearchBuilder("vector2", new float[] { 0.0f }, 10, 100, null) + ) + ) .rankBuilder(new TestRankBuilder(100)); SearchRequest sr = new SearchRequest().allowPartialSearchResults(true).source(ssb); ShardSearchRequest ssr = new ShardSearchRequest(null, sr, new ShardId("test", "testuuid", 1), 1, 1, null, 1.0f, 0, null); dqp.rewriteShardSearchRequest(ssr); - KnnScoreDocQueryBuilder ksdqb0 = new KnnScoreDocQueryBuilder(new ScoreDoc[] { new ScoreDoc(1, 3.0f, 1), new ScoreDoc(4, 1.5f, 1) }); - KnnScoreDocQueryBuilder ksdqb1 = new KnnScoreDocQueryBuilder(new ScoreDoc[] { new ScoreDoc(1, 2.0f, 1) }); + KnnScoreDocQueryBuilder ksdqb0 = new KnnScoreDocQueryBuilder( + new ScoreDoc[] { new ScoreDoc(1, 3.0f, 1), new ScoreDoc(4, 1.5f, 1) }, + "vector", + new float[] { 0.0f } + ); + KnnScoreDocQueryBuilder ksdqb1 = new KnnScoreDocQueryBuilder( + new ScoreDoc[] { new ScoreDoc(1, 2.0f, 1) }, + "vector2", + new float[] { 0.0f } + ); assertEquals( List.of(bm25, ksdqb0, ksdqb1), List.of( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java index e43fa379054bf..c3d2d6a3f194b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java @@ -8,6 +8,11 @@ package org.elasticsearch.index.mapper.vectors; +import org.apache.lucene.queries.function.FunctionQuery; +import org.apache.lucene.queries.function.valuesource.ByteVectorSimilarityFunction; +import org.apache.lucene.queries.function.valuesource.FloatVectorSimilarityFunction; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.KnnByteVectorQuery; import org.apache.lucene.search.KnnFloatVectorQuery; import org.apache.lucene.search.Query; @@ -158,6 +163,64 @@ public void testCreateNestedKnnQuery() { } } + public void testExactKnnQuery() { + int dims = randomIntBetween(2, 2048); + { + DenseVectorFieldType field = new DenseVectorFieldType( + "f", + IndexVersion.current(), + DenseVectorFieldMapper.ElementType.FLOAT, + dims, + true, + VectorSimilarity.COSINE, + Collections.emptyMap() + ); + float[] queryVector = new float[dims]; + for (int i = 0; i < dims; i++) { + queryVector[i] = randomFloat(); + } + Query query = field.createExactKnnQuery(queryVector); + assertTrue(query instanceof BooleanQuery); + BooleanQuery booleanQuery = (BooleanQuery) query; + boolean foundFunction = false; + for (BooleanClause clause : booleanQuery) { + if (clause.getQuery() instanceof FunctionQuery functionQuery) { + foundFunction = true; + assertTrue(functionQuery.getValueSource() instanceof FloatVectorSimilarityFunction); + } + } + assertTrue("Unable to find FloatVectorSimilarityFunction in created BooleanQuery", foundFunction); + } + { + DenseVectorFieldType field = new DenseVectorFieldType( + "f", + IndexVersion.current(), + DenseVectorFieldMapper.ElementType.BYTE, + dims, + true, + VectorSimilarity.COSINE, + Collections.emptyMap() + ); + byte[] queryVector = new byte[dims]; + float[] floatQueryVector = new float[dims]; + for (int i = 0; i < dims; i++) { + queryVector[i] = randomByte(); + floatQueryVector[i] = queryVector[i]; + } + Query query = field.createExactKnnQuery(floatQueryVector); + assertTrue(query instanceof BooleanQuery); + BooleanQuery booleanQuery = (BooleanQuery) query; + boolean foundFunction = false; + for (BooleanClause clause : booleanQuery) { + if (clause.getQuery() instanceof FunctionQuery functionQuery) { + foundFunction = true; + assertTrue(functionQuery.getValueSource() instanceof ByteVectorSimilarityFunction); + } + } + assertTrue("Unable to find FloatVectorSimilarityFunction in created BooleanQuery", foundFunction); + } + } + public void testFloatCreateKnnQuery() { DenseVectorFieldType unindexedField = new DenseVectorFieldType( "f", diff --git a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index 516f65111afca..137e0cb348a9c 100644 --- a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -28,8 +28,12 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.search.vectors.ExactKnnQueryBuilder; +import org.elasticsearch.search.vectors.KnnVectorQueryBuilder; import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.test.TransportVersionUtils; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; import org.hamcrest.Matchers; import java.io.IOException; @@ -48,6 +52,9 @@ public class NestedQueryBuilderTests extends AbstractQueryTestCase { + private static final String VECTOR_FIELD = "vector"; + private static final int VECTOR_DIMENSION = 3; + @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { mapperService.merge( @@ -76,6 +83,27 @@ protected void initializeAdditionalMappings(MapperService mapperService) throws ), MapperService.MergeReason.MAPPING_UPDATE ); + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .startObject("properties") + .startObject(VECTOR_FIELD) + .field("type", "dense_vector") + .field("dims", VECTOR_DIMENSION) + .field("index", true) + .field("similarity", "cosine") + .endObject() + .endObject() + .endObject() + .endObject() + .endObject(); + mapperService.merge( + MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent(Strings.toString(builder)), + MapperService.MergeReason.MAPPING_UPDATE + ); } /** @@ -233,6 +261,27 @@ public void testMustRewrite() throws IOException { assertEquals("Rewrite first", e.getMessage()); } + public void testKnnRewriteForInnerHits() throws IOException { + SearchExecutionContext context = createSearchExecutionContext(); + context.setAllowUnmappedFields(true); + KnnVectorQueryBuilder innerQueryBuilder = new KnnVectorQueryBuilder( + "nested1." + VECTOR_FIELD, + new float[] { 1.0f, 2.0f, 3.0f }, + 1, + null + ); + NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder( + "nested1", + innerQueryBuilder, + RandomPicks.randomFrom(random(), ScoreMode.values()) + ); + InnerHitsRewriteContext rewriteContext = new InnerHitsRewriteContext(context.getParserConfig(), context::nowInMillis); + QueryBuilder queryBuilder = Rewriteable.rewrite(nestedQueryBuilder, rewriteContext, true); + assertTrue(queryBuilder instanceof NestedQueryBuilder); + NestedQueryBuilder rewritten = (NestedQueryBuilder) queryBuilder; + assertTrue(rewritten.query() instanceof ExactKnnQueryBuilder); + } + public void testIgnoreUnmapped() throws IOException { final NestedQueryBuilder queryBuilder = new NestedQueryBuilder("unmapped", new MatchAllQueryBuilder(), ScoreMode.None); queryBuilder.ignoreUnmapped(true); diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index 57974cff0d03c..6a8ac3d1aa876 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -425,6 +425,7 @@ public CheckedBiConsumer getReque "combined_fields", "dis_max", "exists", + "exact_knn", "function_score", "fuzzy", "geo_bounding_box", diff --git a/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java new file mode 100644 index 0000000000000..02093d9fa0e44 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/vectors/ExactKnnQueryBuilderTests.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.vectors; + +import org.apache.lucene.queries.function.FunctionQuery; +import org.apache.lucene.queries.function.valuesource.FloatVectorSimilarityFunction; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.index.IndexVersions; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.AbstractQueryTestCase; +import org.elasticsearch.test.TestGeoShapeFieldMapperPlugin; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Locale; + +public class ExactKnnQueryBuilderTests extends AbstractQueryTestCase { + + private static final String VECTOR_FIELD = "vector"; + private static final int VECTOR_DIMENSION = 3; + + protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject(VECTOR_FIELD) + .field("type", "dense_vector") + .field("dims", VECTOR_DIMENSION) + .field("index", true) + .field("similarity", "cosine") + .endObject() + .endObject() + .endObject(); + mapperService.merge( + MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent(Strings.toString(builder)), + MapperService.MergeReason.MAPPING_UPDATE + ); + } + + @Override + protected Collection> getPlugins() { + return List.of(TestGeoShapeFieldMapperPlugin.class); + } + + @Override + protected ExactKnnQueryBuilder doCreateTestQueryBuilder() { + float[] query = new float[VECTOR_DIMENSION]; + for (int i = 0; i < VECTOR_DIMENSION; i++) { + query[i] = randomFloat(); + } + return new ExactKnnQueryBuilder(query, VECTOR_FIELD); + } + + @Override + public void testValidOutput() { + ExactKnnQueryBuilder query = new ExactKnnQueryBuilder(new float[] { 1.0f, 2.0f, 3.0f }, "field"); + String expected = """ + { + "exact_knn" : { + "query" : [ + 1.0, + 2.0, + 3.0 + ], + "field" : "field" + } + }"""; + assertEquals(expected, query.toString()); + } + + @Override + protected void doAssertLuceneQuery(ExactKnnQueryBuilder queryBuilder, Query query, SearchExecutionContext context) throws IOException { + assertTrue(query instanceof BooleanQuery); + BooleanQuery booleanQuery = (BooleanQuery) query; + boolean foundFunction = false; + for (BooleanClause clause : booleanQuery) { + if (clause.getQuery() instanceof FunctionQuery functionQuery) { + foundFunction = true; + assertTrue(functionQuery.getValueSource() instanceof FloatVectorSimilarityFunction); + String description = functionQuery.getValueSource().description().toLowerCase(Locale.ROOT); + if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(IndexVersions.NORMALIZED_VECTOR_COSINE)) { + assertTrue(description, description.contains("dot_product")); + } else { + assertTrue(description, description.contains("cosine")); + } + } + } + assertTrue("Unable to find FloatVectorSimilarityFunction in created BooleanQuery", foundFunction); + } + + @Override + public void testUnknownObjectException() { + // Test isn't relevant, since query is never parsed from xContent + } + + @Override + public void testFromXContent() throws IOException { + // Test isn't relevant, since query is never parsed from xContent + } + + @Override + public void testUnknownField() { + // Test isn't relevant, since query is never parsed from xContent + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java index eceafe6d12ac9..67bc6bde9c1af 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilderTests.java @@ -23,8 +23,10 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; +import org.elasticsearch.index.query.InnerHitsRewriteContext; import org.elasticsearch.index.query.MatchNoneQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.AbstractQueryTestCase; @@ -38,6 +40,7 @@ import java.util.List; import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; +import static org.elasticsearch.search.vectors.KnnSearchBuilderTests.randomVector; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -58,12 +61,20 @@ protected KnnScoreDocQueryBuilder doCreateTestQueryBuilder() { for (int doc = 0; doc < numDocs; doc++) { scoreDocs.add(new ScoreDoc(doc, randomFloat())); } - return new KnnScoreDocQueryBuilder(scoreDocs.toArray(new ScoreDoc[0])); + return new KnnScoreDocQueryBuilder( + scoreDocs.toArray(new ScoreDoc[0]), + randomBoolean() ? "field" : null, + randomBoolean() ? randomVector(10) : null + ); } @Override public void testValidOutput() { - KnnScoreDocQueryBuilder query = new KnnScoreDocQueryBuilder(new ScoreDoc[] { new ScoreDoc(0, 4.25f), new ScoreDoc(5, 1.6f) }); + KnnScoreDocQueryBuilder query = new KnnScoreDocQueryBuilder( + new ScoreDoc[] { new ScoreDoc(0, 4.25f), new ScoreDoc(5, 1.6f) }, + "field", + new float[] { 1.0f, 2.0f } + ); String expected = """ { "knn_score_doc" : { @@ -76,6 +87,11 @@ public void testValidOutput() { "doc" : 5, "score" : 1.6 } + ], + "field" : "field", + "query" : [ + 1.0, + 2.0 ] } }"""; @@ -144,11 +160,36 @@ public void testMustRewrite() throws IOException { } public void testRewriteToMatchNone() throws IOException { - KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder(new ScoreDoc[0]); - SearchExecutionContext context = createSearchExecutionContext(); + KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder( + new ScoreDoc[0], + randomBoolean() ? "field" : null, + randomBoolean() ? randomVector(10) : null + ); + QueryRewriteContext context = randomBoolean() + ? new InnerHitsRewriteContext(createSearchExecutionContext().getParserConfig(), System::currentTimeMillis) + : createSearchExecutionContext(); assertEquals(new MatchNoneQueryBuilder(), queryBuilder.rewrite(context)); } + public void testRewriteForInnerHits() throws IOException { + SearchExecutionContext context = createSearchExecutionContext(); + InnerHitsRewriteContext innerHitsRewriteContext = new InnerHitsRewriteContext(context.getParserConfig(), System::currentTimeMillis); + KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder( + new ScoreDoc[] { new ScoreDoc(0, 4.25f), new ScoreDoc(5, 1.6f) }, + randomAlphaOfLength(10), + randomVector(10) + ); + queryBuilder.boost(randomFloat()); + queryBuilder.queryName(randomAlphaOfLength(10)); + QueryBuilder rewritten = queryBuilder.rewrite(innerHitsRewriteContext); + assertTrue(rewritten instanceof ExactKnnQueryBuilder); + ExactKnnQueryBuilder exactKnnQueryBuilder = (ExactKnnQueryBuilder) rewritten; + assertEquals(queryBuilder.queryVector(), exactKnnQueryBuilder.getQuery()); + assertEquals(queryBuilder.fieldName(), exactKnnQueryBuilder.getField()); + assertEquals(queryBuilder.boost(), exactKnnQueryBuilder.boost(), 0.0001f); + assertEquals(queryBuilder.queryName(), exactKnnQueryBuilder.queryName()); + } + @Override public void testUnknownObjectException() { // Test isn't relevant, since query is never parsed from xContent @@ -185,7 +226,7 @@ public void testScoreDocQueryWeightCount() throws IOException { } ScoreDoc[] scoreDocs = scoreDocsList.toArray(new ScoreDoc[0]); - KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder(scoreDocs); + KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder(scoreDocs, "field", randomVector(10)); Query query = queryBuilder.doToQuery(context); final Weight w = query.createWeight(searcher, ScoreMode.TOP_SCORES, 1.0f); for (LeafReaderContext leafReaderContext : searcher.getLeafContexts()) { @@ -228,7 +269,7 @@ public void testScoreDocQuery() throws IOException { } ScoreDoc[] scoreDocs = scoreDocsList.toArray(new ScoreDoc[0]); - KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder(scoreDocs); + KnnScoreDocQueryBuilder queryBuilder = new KnnScoreDocQueryBuilder(scoreDocs, "field", randomVector(10)); final Query query = queryBuilder.doToQuery(context); final Weight w = query.createWeight(searcher, ScoreMode.TOP_SCORES, 1.0f); From 9b68f3c267091397dbb1223de7fb1c2ec665bff3 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 17 Jan 2024 11:26:21 -0800 Subject: [PATCH 87/95] Upgrade bundled JDK to 21.0.2 (#104481) --- build-tools-internal/version.properties | 2 +- docs/changelog/104481.yaml | 6 ++++ gradle/verification-metadata.xml | 47 +++++++------------------ 3 files changed, 20 insertions(+), 35 deletions(-) create mode 100644 docs/changelog/104481.yaml diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index a76f507079f2f..1e0b7de03b340 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -2,7 +2,7 @@ elasticsearch = 8.13.0 lucene = 9.9.1 bundled_jdk_vendor = openjdk -bundled_jdk = 21.0.1+12@415e3f918a1f4062a0074a2794853d0d +bundled_jdk = 21.0.2+13@f2283984656d49d69e91c558476027ac # optional dependencies spatial4j = 0.7 jts = 1.15.0 diff --git a/docs/changelog/104481.yaml b/docs/changelog/104481.yaml new file mode 100644 index 0000000000000..5377efdc7109e --- /dev/null +++ b/docs/changelog/104481.yaml @@ -0,0 +1,6 @@ +pr: 104481 +summary: Upgrade bundled JDK to 21.0.2 +area: Packaging +type: upgrade +issues: + - 4449 diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 24b81106dcea3..163221315280b 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -1689,46 +1689,25 @@ - - - + + + - - + + - - - + + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - + + + From 7d00d0667138c52f3287ae058c29e0ae71480f4d Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Wed, 17 Jan 2024 14:25:16 -0500 Subject: [PATCH 88/95] Bump versions after 8.12.0 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 12 ++++++------ .buildkite/pipelines/periodic.yml | 12 ++++++------ .ci/bwcVersions | 2 +- .ci/snapshotBwcVersions | 3 +-- server/src/main/java/org/elasticsearch/Version.java | 3 ++- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index c80cd99067743..6b4e238e6e0f8 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -48,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.17", "8.11.5", "8.12.0", "8.13.0"] + BWC_VERSION: ["7.17.17", "8.12.1", "8.13.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 289139bee61b0..67c352f21d62b 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1793,8 +1793,8 @@ steps: env: BWC_VERSION: 8.11.4 - - label: "{{matrix.image}} / 8.11.5 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.5 + - label: "{{matrix.image}} / 8.12.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.0 timeout_in_minutes: 300 matrix: setup: @@ -1807,10 +1807,10 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.11.5 + BWC_VERSION: 8.12.0 - - label: "{{matrix.image}} / 8.12.0 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.0 + - label: "{{matrix.image}} / 8.12.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.1 timeout_in_minutes: 300 matrix: setup: @@ -1823,7 +1823,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.12.0 + BWC_VERSION: 8.12.1 - label: "{{matrix.image}} / 8.13.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.13.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 6e8dc5e5265b3..efd47fc74dd0d 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1102,8 +1102,8 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.11.4 - - label: 8.11.5 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.5#bwcTest + - label: 8.12.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.0#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1111,9 +1111,9 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.11.5 - - label: 8.12.0 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.0#bwcTest + BWC_VERSION: 8.12.0 + - label: 8.12.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.1#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1121,7 +1121,7 @@ steps: machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.12.0 + BWC_VERSION: 8.12.1 - label: 8.13.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.13.0#bwcTest timeout_in_minutes: 300 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index f5c724dd4312c..97bce22156c6b 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -109,6 +109,6 @@ BWC_VERSION: - "8.11.2" - "8.11.3" - "8.11.4" - - "8.11.5" - "8.12.0" + - "8.12.1" - "8.13.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 9329a13bc7411..1d509c90d999b 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,4 @@ BWC_VERSION: - "7.17.17" - - "8.11.5" - - "8.12.0" + - "8.12.1" - "8.13.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 126893bc36274..781e3f1398e32 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -160,8 +160,9 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_11_2 = new Version(8_11_02_99); public static final Version V_8_11_3 = new Version(8_11_03_99); public static final Version V_8_11_4 = new Version(8_11_04_99); - public static final Version V_8_11_5 = new Version(8_11_05_99); public static final Version V_8_12_0 = new Version(8_12_00_99); + public static final Version V_8_12_1 = new Version(8_12_01_99); + public static final Version V_8_13_0 = new Version(8_13_00_99); public static final Version CURRENT = V_8_13_0; From a83eed4850945f247bc9c3a0cbb8b203e80d6d65 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Wed, 17 Jan 2024 14:28:44 -0500 Subject: [PATCH 89/95] Prune changelogs after 8.12.0 release --- docs/changelog/100031.yaml | 5 ----- docs/changelog/100033.yaml | 9 --------- docs/changelog/100236.yaml | 5 ----- docs/changelog/100287.yaml | 5 ----- docs/changelog/100316.yaml | 6 ------ docs/changelog/100333.yaml | 5 ----- docs/changelog/100368.yaml | 5 ----- docs/changelog/100383.yaml | 5 ----- docs/changelog/100392.yaml | 5 ----- docs/changelog/100408.yaml | 5 ----- docs/changelog/100466.yaml | 5 ----- docs/changelog/100492.yaml | 6 ------ docs/changelog/100519.yaml | 5 ----- docs/changelog/100565.yaml | 5 ----- docs/changelog/100570.yaml | 5 ----- docs/changelog/100609.yaml | 5 ----- docs/changelog/100642.yaml | 6 ------ docs/changelog/100646.yaml | 5 ----- docs/changelog/100776.yaml | 6 ------ docs/changelog/100826.yaml | 7 ------- docs/changelog/100828.yaml | 5 ----- docs/changelog/100862.yaml | 5 ----- docs/changelog/100899.yaml | 5 ----- docs/changelog/100921.yaml | 5 ----- docs/changelog/100938.yaml | 5 ----- docs/changelog/100974.yaml | 5 ----- docs/changelog/100990.yaml | 5 ----- docs/changelog/101024.yaml | 5 ----- docs/changelog/101026.yaml | 5 ----- docs/changelog/101032.yaml | 5 ----- docs/changelog/101050.yaml | 5 ----- docs/changelog/101055.yaml | 5 ----- docs/changelog/101057.yaml | 5 ----- docs/changelog/101066.yaml | 6 ------ docs/changelog/101093.yaml | 6 ------ docs/changelog/101126.yaml | 5 ----- docs/changelog/101147.yaml | 5 ----- docs/changelog/101148.yaml | 6 ------ docs/changelog/101185.yaml | 5 ----- docs/changelog/101202.yaml | 5 ----- docs/changelog/101230.yaml | 12 ------------ docs/changelog/101235.yaml | 5 ----- docs/changelog/101311.yaml | 5 ----- docs/changelog/101333.yaml | 29 ----------------------------- docs/changelog/101346.yaml | 5 ----- docs/changelog/101383.yaml | 5 ----- docs/changelog/101385.yaml | 6 ------ docs/changelog/101390.yaml | 5 ----- docs/changelog/101392.yaml | 5 ----- docs/changelog/101396.yaml | 5 ----- docs/changelog/101409.yaml | 5 ----- docs/changelog/101423.yaml | 5 ----- docs/changelog/101426.yaml | 5 ----- docs/changelog/101457.yaml | 14 -------------- docs/changelog/101474.yaml | 5 ----- docs/changelog/101488.yaml | 5 ----- docs/changelog/101518.yaml | 6 ------ docs/changelog/101535.yaml | 5 ----- docs/changelog/101577.yaml | 5 ----- docs/changelog/101585.yaml | 6 ------ docs/changelog/101607.yaml | 5 ----- docs/changelog/101609.yaml | 9 --------- docs/changelog/101660.yaml | 6 ------ docs/changelog/101682.yaml | 5 ----- docs/changelog/101700.yaml | 5 ----- docs/changelog/101723.yaml | 6 ------ docs/changelog/101727.yaml | 5 ----- docs/changelog/101753.yaml | 5 ----- docs/changelog/101788.yaml | 6 ------ docs/changelog/101802.yaml | 5 ----- docs/changelog/101815.yaml | 5 ----- docs/changelog/101826.yaml | 6 ------ docs/changelog/101845.yaml | 5 ----- docs/changelog/101846.yaml | 5 ----- docs/changelog/101847.yaml | 6 ------ docs/changelog/101859.yaml | 6 ------ docs/changelog/101868.yaml | 5 ----- docs/changelog/101904.yaml | 5 ----- docs/changelog/101979.yaml | 5 ----- docs/changelog/101989.yaml | 5 ----- docs/changelog/102020.yaml | 5 ----- docs/changelog/102032.yaml | 5 ----- docs/changelog/102048.yaml | 5 ----- docs/changelog/102051.yaml | 5 ----- docs/changelog/102056.yaml | 5 ----- docs/changelog/102065.yaml | 5 ----- docs/changelog/102075.yaml | 5 ----- docs/changelog/102089.yaml | 5 ----- docs/changelog/102093.yaml | 14 -------------- docs/changelog/102138.yaml | 5 ----- docs/changelog/102140.yaml | 6 ------ docs/changelog/102165.yaml | 6 ------ docs/changelog/102172.yaml | 5 ----- docs/changelog/102177.yaml | 5 ----- docs/changelog/102183.yaml | 13 ------------- docs/changelog/102184.yaml | 5 ----- docs/changelog/102188.yaml | 5 ----- docs/changelog/102190.yaml | 5 ----- docs/changelog/102192.yaml | 5 ----- docs/changelog/102193.yaml | 5 ----- docs/changelog/102208.yaml | 5 ----- docs/changelog/102244.yaml | 5 ----- docs/changelog/102245.yaml | 5 ----- docs/changelog/102248.yaml | 5 ----- docs/changelog/102273.yaml | 5 ----- docs/changelog/102292.yaml | 5 ----- docs/changelog/102317.yaml | 6 ------ docs/changelog/102350.yaml | 6 ------ docs/changelog/102379.yaml | 6 ------ docs/changelog/102388.yaml | 6 ------ docs/changelog/102391.yaml | 5 ----- docs/changelog/102417.yaml | 6 ------ docs/changelog/102426.yaml | 5 ----- docs/changelog/102434.yaml | 5 ----- docs/changelog/102447.yaml | 6 ------ docs/changelog/102456.yaml | 6 ------ docs/changelog/102461.yaml | 5 ----- docs/changelog/102462.yaml | 5 ----- docs/changelog/102472.yaml | 5 ----- docs/changelog/102476.yaml | 5 ----- docs/changelog/102490.yaml | 6 ------ docs/changelog/102495.yaml | 6 ------ docs/changelog/102510.yaml | 7 ------- docs/changelog/102511.yaml | 5 ----- docs/changelog/102512.yaml | 6 ------ docs/changelog/102562.yaml | 5 ----- docs/changelog/102570.yaml | 5 ----- docs/changelog/102571.yaml | 5 ----- docs/changelog/102598.yaml | 5 ----- docs/changelog/102602.yaml | 5 ----- docs/changelog/102612.yaml | 5 ----- docs/changelog/102636.yaml | 5 ----- docs/changelog/102637.yaml | 5 ----- docs/changelog/102644.yaml | 5 ----- docs/changelog/102673.yaml | 5 ----- docs/changelog/102680.yaml | 5 ----- docs/changelog/102682.yaml | 5 ----- docs/changelog/102710.yaml | 5 ----- docs/changelog/102713.yaml | 5 ----- docs/changelog/102727.yaml | 5 ----- docs/changelog/102731.yaml | 5 ----- docs/changelog/102735.yaml | 5 ----- docs/changelog/102740.yaml | 5 ----- docs/changelog/102767.yaml | 6 ------ docs/changelog/102806.yaml | 5 ----- docs/changelog/102808.yaml | 6 ------ docs/changelog/102810.yaml | 5 ----- docs/changelog/102811.yaml | 6 ------ docs/changelog/102832.yaml | 5 ----- docs/changelog/102840.yaml | 5 ----- docs/changelog/102844.yaml | 5 ----- docs/changelog/102877.yaml | 5 ----- docs/changelog/102888.yaml | 5 ----- docs/changelog/102901.yaml | 5 ----- docs/changelog/102902.yaml | 5 ----- docs/changelog/102906.yaml | 6 ------ docs/changelog/102916.yaml | 6 ------ docs/changelog/102919.yaml | 5 ----- docs/changelog/102925.yaml | 5 ----- docs/changelog/102937.yaml | 5 ----- docs/changelog/102944.yaml | 6 ------ docs/changelog/102967.yaml | 6 ------ docs/changelog/102994.yaml | 5 ----- docs/changelog/103013.yaml | 5 ----- docs/changelog/103024.yaml | 6 ------ docs/changelog/103061.yaml | 5 ----- docs/changelog/103116.yaml | 6 ------ docs/changelog/103124.yaml | 5 ----- docs/changelog/103150.yaml | 6 ------ docs/changelog/103183.yaml | 6 ------ docs/changelog/103185.yaml | 5 ----- docs/changelog/103203.yaml | 5 ----- docs/changelog/103209.yaml | 6 ------ docs/changelog/103212.yaml | 5 ----- docs/changelog/103251.yaml | 5 ----- docs/changelog/103339.yaml | 6 ------ docs/changelog/103342.yaml | 5 ----- docs/changelog/103361.yaml | 5 ----- docs/changelog/103408.yaml | 6 ------ docs/changelog/103427.yaml | 5 ----- docs/changelog/103430.yaml | 5 ----- docs/changelog/103435.yaml | 5 ----- docs/changelog/103508.yaml | 5 ----- docs/changelog/103530.yaml | 5 ----- docs/changelog/103546.yaml | 5 ----- docs/changelog/103574.yaml | 5 ----- docs/changelog/103580.yaml | 6 ------ docs/changelog/103591.yaml | 6 ------ docs/changelog/103601.yaml | 7 ------- docs/changelog/103615.yaml | 5 ----- docs/changelog/103670.yaml | 5 ----- docs/changelog/103690.yaml | 5 ----- docs/changelog/103873.yaml | 5 ----- docs/changelog/103923.yaml | 5 ----- docs/changelog/104029.yaml | 5 ----- docs/changelog/104046.yaml | 5 ----- docs/changelog/104051.yaml | 6 ------ docs/changelog/96968.yaml | 6 ------ docs/changelog/98874.yaml | 5 ----- docs/changelog/98882.yaml | 6 ------ docs/changelog/98883.yaml | 6 ------ docs/changelog/98916.yaml | 5 ----- docs/changelog/99134.yaml | 5 ----- docs/changelog/99445.yaml | 5 ----- docs/changelog/99702.yaml | 6 ------ docs/changelog/99752.yaml | 5 ----- docs/changelog/99852.yaml | 5 ----- docs/changelog/99963.yaml | 5 ----- docs/changelog/99975.yaml | 5 ----- docs/changelog/99984.yaml | 6 ------ 210 files changed, 1171 deletions(-) delete mode 100644 docs/changelog/100031.yaml delete mode 100644 docs/changelog/100033.yaml delete mode 100644 docs/changelog/100236.yaml delete mode 100644 docs/changelog/100287.yaml delete mode 100644 docs/changelog/100316.yaml delete mode 100644 docs/changelog/100333.yaml delete mode 100644 docs/changelog/100368.yaml delete mode 100644 docs/changelog/100383.yaml delete mode 100644 docs/changelog/100392.yaml delete mode 100644 docs/changelog/100408.yaml delete mode 100644 docs/changelog/100466.yaml delete mode 100644 docs/changelog/100492.yaml delete mode 100644 docs/changelog/100519.yaml delete mode 100644 docs/changelog/100565.yaml delete mode 100644 docs/changelog/100570.yaml delete mode 100644 docs/changelog/100609.yaml delete mode 100644 docs/changelog/100642.yaml delete mode 100644 docs/changelog/100646.yaml delete mode 100644 docs/changelog/100776.yaml delete mode 100644 docs/changelog/100826.yaml delete mode 100644 docs/changelog/100828.yaml delete mode 100644 docs/changelog/100862.yaml delete mode 100644 docs/changelog/100899.yaml delete mode 100644 docs/changelog/100921.yaml delete mode 100644 docs/changelog/100938.yaml delete mode 100644 docs/changelog/100974.yaml delete mode 100644 docs/changelog/100990.yaml delete mode 100644 docs/changelog/101024.yaml delete mode 100644 docs/changelog/101026.yaml delete mode 100644 docs/changelog/101032.yaml delete mode 100644 docs/changelog/101050.yaml delete mode 100644 docs/changelog/101055.yaml delete mode 100644 docs/changelog/101057.yaml delete mode 100644 docs/changelog/101066.yaml delete mode 100644 docs/changelog/101093.yaml delete mode 100644 docs/changelog/101126.yaml delete mode 100644 docs/changelog/101147.yaml delete mode 100644 docs/changelog/101148.yaml delete mode 100644 docs/changelog/101185.yaml delete mode 100644 docs/changelog/101202.yaml delete mode 100644 docs/changelog/101230.yaml delete mode 100644 docs/changelog/101235.yaml delete mode 100644 docs/changelog/101311.yaml delete mode 100644 docs/changelog/101333.yaml delete mode 100644 docs/changelog/101346.yaml delete mode 100644 docs/changelog/101383.yaml delete mode 100644 docs/changelog/101385.yaml delete mode 100644 docs/changelog/101390.yaml delete mode 100644 docs/changelog/101392.yaml delete mode 100644 docs/changelog/101396.yaml delete mode 100644 docs/changelog/101409.yaml delete mode 100644 docs/changelog/101423.yaml delete mode 100644 docs/changelog/101426.yaml delete mode 100644 docs/changelog/101457.yaml delete mode 100644 docs/changelog/101474.yaml delete mode 100644 docs/changelog/101488.yaml delete mode 100644 docs/changelog/101518.yaml delete mode 100644 docs/changelog/101535.yaml delete mode 100644 docs/changelog/101577.yaml delete mode 100644 docs/changelog/101585.yaml delete mode 100644 docs/changelog/101607.yaml delete mode 100644 docs/changelog/101609.yaml delete mode 100644 docs/changelog/101660.yaml delete mode 100644 docs/changelog/101682.yaml delete mode 100644 docs/changelog/101700.yaml delete mode 100644 docs/changelog/101723.yaml delete mode 100644 docs/changelog/101727.yaml delete mode 100644 docs/changelog/101753.yaml delete mode 100644 docs/changelog/101788.yaml delete mode 100644 docs/changelog/101802.yaml delete mode 100644 docs/changelog/101815.yaml delete mode 100644 docs/changelog/101826.yaml delete mode 100644 docs/changelog/101845.yaml delete mode 100644 docs/changelog/101846.yaml delete mode 100644 docs/changelog/101847.yaml delete mode 100644 docs/changelog/101859.yaml delete mode 100644 docs/changelog/101868.yaml delete mode 100644 docs/changelog/101904.yaml delete mode 100644 docs/changelog/101979.yaml delete mode 100644 docs/changelog/101989.yaml delete mode 100644 docs/changelog/102020.yaml delete mode 100644 docs/changelog/102032.yaml delete mode 100644 docs/changelog/102048.yaml delete mode 100644 docs/changelog/102051.yaml delete mode 100644 docs/changelog/102056.yaml delete mode 100644 docs/changelog/102065.yaml delete mode 100644 docs/changelog/102075.yaml delete mode 100644 docs/changelog/102089.yaml delete mode 100644 docs/changelog/102093.yaml delete mode 100644 docs/changelog/102138.yaml delete mode 100644 docs/changelog/102140.yaml delete mode 100644 docs/changelog/102165.yaml delete mode 100644 docs/changelog/102172.yaml delete mode 100644 docs/changelog/102177.yaml delete mode 100644 docs/changelog/102183.yaml delete mode 100644 docs/changelog/102184.yaml delete mode 100644 docs/changelog/102188.yaml delete mode 100644 docs/changelog/102190.yaml delete mode 100644 docs/changelog/102192.yaml delete mode 100644 docs/changelog/102193.yaml delete mode 100644 docs/changelog/102208.yaml delete mode 100644 docs/changelog/102244.yaml delete mode 100644 docs/changelog/102245.yaml delete mode 100644 docs/changelog/102248.yaml delete mode 100644 docs/changelog/102273.yaml delete mode 100644 docs/changelog/102292.yaml delete mode 100644 docs/changelog/102317.yaml delete mode 100644 docs/changelog/102350.yaml delete mode 100644 docs/changelog/102379.yaml delete mode 100644 docs/changelog/102388.yaml delete mode 100644 docs/changelog/102391.yaml delete mode 100644 docs/changelog/102417.yaml delete mode 100644 docs/changelog/102426.yaml delete mode 100644 docs/changelog/102434.yaml delete mode 100644 docs/changelog/102447.yaml delete mode 100644 docs/changelog/102456.yaml delete mode 100644 docs/changelog/102461.yaml delete mode 100644 docs/changelog/102462.yaml delete mode 100644 docs/changelog/102472.yaml delete mode 100644 docs/changelog/102476.yaml delete mode 100644 docs/changelog/102490.yaml delete mode 100644 docs/changelog/102495.yaml delete mode 100644 docs/changelog/102510.yaml delete mode 100644 docs/changelog/102511.yaml delete mode 100644 docs/changelog/102512.yaml delete mode 100644 docs/changelog/102562.yaml delete mode 100644 docs/changelog/102570.yaml delete mode 100644 docs/changelog/102571.yaml delete mode 100644 docs/changelog/102598.yaml delete mode 100644 docs/changelog/102602.yaml delete mode 100644 docs/changelog/102612.yaml delete mode 100644 docs/changelog/102636.yaml delete mode 100644 docs/changelog/102637.yaml delete mode 100644 docs/changelog/102644.yaml delete mode 100644 docs/changelog/102673.yaml delete mode 100644 docs/changelog/102680.yaml delete mode 100644 docs/changelog/102682.yaml delete mode 100644 docs/changelog/102710.yaml delete mode 100644 docs/changelog/102713.yaml delete mode 100644 docs/changelog/102727.yaml delete mode 100644 docs/changelog/102731.yaml delete mode 100644 docs/changelog/102735.yaml delete mode 100644 docs/changelog/102740.yaml delete mode 100644 docs/changelog/102767.yaml delete mode 100644 docs/changelog/102806.yaml delete mode 100644 docs/changelog/102808.yaml delete mode 100644 docs/changelog/102810.yaml delete mode 100644 docs/changelog/102811.yaml delete mode 100644 docs/changelog/102832.yaml delete mode 100644 docs/changelog/102840.yaml delete mode 100644 docs/changelog/102844.yaml delete mode 100644 docs/changelog/102877.yaml delete mode 100644 docs/changelog/102888.yaml delete mode 100644 docs/changelog/102901.yaml delete mode 100644 docs/changelog/102902.yaml delete mode 100644 docs/changelog/102906.yaml delete mode 100644 docs/changelog/102916.yaml delete mode 100644 docs/changelog/102919.yaml delete mode 100644 docs/changelog/102925.yaml delete mode 100644 docs/changelog/102937.yaml delete mode 100644 docs/changelog/102944.yaml delete mode 100644 docs/changelog/102967.yaml delete mode 100644 docs/changelog/102994.yaml delete mode 100644 docs/changelog/103013.yaml delete mode 100644 docs/changelog/103024.yaml delete mode 100644 docs/changelog/103061.yaml delete mode 100644 docs/changelog/103116.yaml delete mode 100644 docs/changelog/103124.yaml delete mode 100644 docs/changelog/103150.yaml delete mode 100644 docs/changelog/103183.yaml delete mode 100644 docs/changelog/103185.yaml delete mode 100644 docs/changelog/103203.yaml delete mode 100644 docs/changelog/103209.yaml delete mode 100644 docs/changelog/103212.yaml delete mode 100644 docs/changelog/103251.yaml delete mode 100644 docs/changelog/103339.yaml delete mode 100644 docs/changelog/103342.yaml delete mode 100644 docs/changelog/103361.yaml delete mode 100644 docs/changelog/103408.yaml delete mode 100644 docs/changelog/103427.yaml delete mode 100644 docs/changelog/103430.yaml delete mode 100644 docs/changelog/103435.yaml delete mode 100644 docs/changelog/103508.yaml delete mode 100644 docs/changelog/103530.yaml delete mode 100644 docs/changelog/103546.yaml delete mode 100644 docs/changelog/103574.yaml delete mode 100644 docs/changelog/103580.yaml delete mode 100644 docs/changelog/103591.yaml delete mode 100644 docs/changelog/103601.yaml delete mode 100644 docs/changelog/103615.yaml delete mode 100644 docs/changelog/103670.yaml delete mode 100644 docs/changelog/103690.yaml delete mode 100644 docs/changelog/103873.yaml delete mode 100644 docs/changelog/103923.yaml delete mode 100644 docs/changelog/104029.yaml delete mode 100644 docs/changelog/104046.yaml delete mode 100644 docs/changelog/104051.yaml delete mode 100644 docs/changelog/96968.yaml delete mode 100644 docs/changelog/98874.yaml delete mode 100644 docs/changelog/98882.yaml delete mode 100644 docs/changelog/98883.yaml delete mode 100644 docs/changelog/98916.yaml delete mode 100644 docs/changelog/99134.yaml delete mode 100644 docs/changelog/99445.yaml delete mode 100644 docs/changelog/99702.yaml delete mode 100644 docs/changelog/99752.yaml delete mode 100644 docs/changelog/99852.yaml delete mode 100644 docs/changelog/99963.yaml delete mode 100644 docs/changelog/99975.yaml delete mode 100644 docs/changelog/99984.yaml diff --git a/docs/changelog/100031.yaml b/docs/changelog/100031.yaml deleted file mode 100644 index 32aa51d2f9de6..0000000000000 --- a/docs/changelog/100031.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100031 -summary: Add executed pipelines to bulk api response -area: Indices APIs -type: enhancement -issues: [] diff --git a/docs/changelog/100033.yaml b/docs/changelog/100033.yaml deleted file mode 100644 index 92ef6cd289fdc..0000000000000 --- a/docs/changelog/100033.yaml +++ /dev/null @@ -1,9 +0,0 @@ -pr: 100033 -summary: "[Behavioral Analytics] Analytics collections use Data Stream Lifecycle (DSL)\ - \ instead of Index Lifecycle Management (ILM) for data retention management. Behavioral\ - \ analytics has traditionally used ILM to manage data retention. Starting with 8.12.0,\ - \ this will change. Analytics collections created prior to 8.12.0 will continue to use\ - \ their existing ILM policies, but new analytics collections will be managed using DSL." -area: Application -type: feature -issues: [ ] diff --git a/docs/changelog/100236.yaml b/docs/changelog/100236.yaml deleted file mode 100644 index b33825f9bc553..0000000000000 --- a/docs/changelog/100236.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100236 -summary: Record operation purpose for s3 stats collection -area: Distributed -type: enhancement -issues: [] diff --git a/docs/changelog/100287.yaml b/docs/changelog/100287.yaml deleted file mode 100644 index b92855a3342e2..0000000000000 --- a/docs/changelog/100287.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100287 -summary: Add an assertion to the testTransformFeatureReset test case -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/100316.yaml b/docs/changelog/100316.yaml deleted file mode 100644 index 9efb64a332dc1..0000000000000 --- a/docs/changelog/100316.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100316 -summary: Parallelize stale index deletion -area: Snapshot/Restore -type: enhancement -issues: - - 61513 diff --git a/docs/changelog/100333.yaml b/docs/changelog/100333.yaml deleted file mode 100644 index 96a2a62deffe5..0000000000000 --- a/docs/changelog/100333.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100333 -summary: Enable Universal Profiling as Enterprise feature -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/100368.yaml b/docs/changelog/100368.yaml deleted file mode 100644 index 2b9d8dc0b2044..0000000000000 --- a/docs/changelog/100368.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100368 -summary: "Status codes for Aggregation errors, part 2" -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/100383.yaml b/docs/changelog/100383.yaml deleted file mode 100644 index 6cda66149b2cc..0000000000000 --- a/docs/changelog/100383.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100383 -summary: Push s3 requests count via metrics API -area: Distributed -type: enhancement -issues: [] diff --git a/docs/changelog/100392.yaml b/docs/changelog/100392.yaml deleted file mode 100644 index ab693d5ae04ce..0000000000000 --- a/docs/changelog/100392.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100392 -summary: Prevent resource over-subscription in model allocation planner -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/100408.yaml b/docs/changelog/100408.yaml deleted file mode 100644 index 275c3b4a0de48..0000000000000 --- a/docs/changelog/100408.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100408 -summary: "ESQL: Make blocks ref counted" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/100466.yaml b/docs/changelog/100466.yaml deleted file mode 100644 index aaa30876ddfdf..0000000000000 --- a/docs/changelog/100466.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100466 -summary: "Introduce includeShardsStats in the stats request to indicate that we only fetch a summary" -area: Stats -type: enhancement -issues: [99744] diff --git a/docs/changelog/100492.yaml b/docs/changelog/100492.yaml deleted file mode 100644 index e0a1020b49488..0000000000000 --- a/docs/changelog/100492.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100492 -summary: Add runtime field of type `geo_shape` -area: Geo -type: enhancement -issues: - - 61299 diff --git a/docs/changelog/100519.yaml b/docs/changelog/100519.yaml deleted file mode 100644 index 086c6962b3a95..0000000000000 --- a/docs/changelog/100519.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100519 -summary: Disallow vectors whose magnitudes will not fit in a float -area: Vector Search -type: bug -issues: [] diff --git a/docs/changelog/100565.yaml b/docs/changelog/100565.yaml deleted file mode 100644 index 066e9bbb4b227..0000000000000 --- a/docs/changelog/100565.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100565 -summary: "[Monitoring] Dont get cluster state until recovery" -area: Monitoring -type: bug -issues: [] diff --git a/docs/changelog/100570.yaml b/docs/changelog/100570.yaml deleted file mode 100644 index b68a905b0e046..0000000000000 --- a/docs/changelog/100570.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100570 -summary: Added metric for cache eviction of entries with non zero frequency -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/100609.yaml b/docs/changelog/100609.yaml deleted file mode 100644 index c1c63c1af5d4d..0000000000000 --- a/docs/changelog/100609.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100609 -summary: Fix metric gauge creation model -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/100642.yaml b/docs/changelog/100642.yaml deleted file mode 100644 index 805a20174e11d..0000000000000 --- a/docs/changelog/100642.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100642 -summary: "ESQL: Alias duplicated aggregations in a stats" -area: ES|QL -type: enhancement -issues: - - 100544 diff --git a/docs/changelog/100646.yaml b/docs/changelog/100646.yaml deleted file mode 100644 index 63958ff18c4df..0000000000000 --- a/docs/changelog/100646.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100646 -summary: Support complex datemath expressions in index and index alias names -area: Search -type: bug -issues: [] diff --git a/docs/changelog/100776.yaml b/docs/changelog/100776.yaml deleted file mode 100644 index a0bde13f47c92..0000000000000 --- a/docs/changelog/100776.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100776 -summary: Health Report API should not return RED for unassigned cold/frozen shards - when data is available -area: ILM+SLM -type: enhancement -issues: [] diff --git a/docs/changelog/100826.yaml b/docs/changelog/100826.yaml deleted file mode 100644 index 1b1729d1491ea..0000000000000 --- a/docs/changelog/100826.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 100826 -summary: Fix geo tile bounding boxes to be consistent with arithmetic method -area: Geo -type: bug -issues: - - 92611 - - 95574 diff --git a/docs/changelog/100828.yaml b/docs/changelog/100828.yaml deleted file mode 100644 index 6271a1cf2a0a9..0000000000000 --- a/docs/changelog/100828.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100828 -summary: Consider task cancelled exceptions as recoverable -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/100862.yaml b/docs/changelog/100862.yaml deleted file mode 100644 index ce9f119203d9d..0000000000000 --- a/docs/changelog/100862.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100862 -summary: Sending an index name to `DocumentParsingObserver` that is not ever null -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/100899.yaml b/docs/changelog/100899.yaml deleted file mode 100644 index 988546bb22cbe..0000000000000 --- a/docs/changelog/100899.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100899 -summary: Add methods for adding generation listeners with primary term -area: Store -type: enhancement -issues: [] diff --git a/docs/changelog/100921.yaml b/docs/changelog/100921.yaml deleted file mode 100644 index e6e2caa93d465..0000000000000 --- a/docs/changelog/100921.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100921 -summary: "Add support for Serbian Language Analyzer" -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/100938.yaml b/docs/changelog/100938.yaml deleted file mode 100644 index b21f6955c992e..0000000000000 --- a/docs/changelog/100938.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100938 -summary: "Set includeShardsStats = false in NodesStatsRequest where the caller does not use shards-level statistics" -area: Stats -type: enhancement -issues: [] diff --git a/docs/changelog/100974.yaml b/docs/changelog/100974.yaml deleted file mode 100644 index e5d3a4ad3c9df..0000000000000 --- a/docs/changelog/100974.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100974 -summary: Create new cluster state API for querying features present on a cluster -area: "Infra/Core" -type: feature -issues: [] diff --git a/docs/changelog/100990.yaml b/docs/changelog/100990.yaml deleted file mode 100644 index 21b6fb93655cc..0000000000000 --- a/docs/changelog/100990.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100990 -summary: Add status code to `rest.suppressed` log output -area: "Infra/Logging" -type: enhancement -issues: [] diff --git a/docs/changelog/101024.yaml b/docs/changelog/101024.yaml deleted file mode 100644 index edbd3d834526c..0000000000000 --- a/docs/changelog/101024.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101024 -summary: More consistent logging messages for snapshot deletion -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101026.yaml b/docs/changelog/101026.yaml deleted file mode 100644 index cee85a722d7fa..0000000000000 --- a/docs/changelog/101026.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101026 -summary: Remove `auto_configure` privilege for profiling -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/101032.yaml b/docs/changelog/101032.yaml deleted file mode 100644 index 1c69e372704ce..0000000000000 --- a/docs/changelog/101032.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101032 -summary: Throw when wrapping rate agg in `DeferableBucketAggregator` -area: TSDB -type: bug -issues: [] diff --git a/docs/changelog/101050.yaml b/docs/changelog/101050.yaml deleted file mode 100644 index 1a68466e6e728..0000000000000 --- a/docs/changelog/101050.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101050 -summary: Ensure the correct `threadContext` for `RemoteClusterNodesAction` -area: Network -type: bug -issues: [] diff --git a/docs/changelog/101055.yaml b/docs/changelog/101055.yaml deleted file mode 100644 index e4ca4548c2ef6..0000000000000 --- a/docs/changelog/101055.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101055 -summary: Make tasks that calculate checkpoints time out -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/101057.yaml b/docs/changelog/101057.yaml deleted file mode 100644 index 2024c714f58b0..0000000000000 --- a/docs/changelog/101057.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101057 -summary: Add error logging for *QL -area: EQL -type: enhancement -issues: [] diff --git a/docs/changelog/101066.yaml b/docs/changelog/101066.yaml deleted file mode 100644 index 2fac601d65674..0000000000000 --- a/docs/changelog/101066.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101066 -summary: Log errors in `RestResponse` regardless of `error_trace` parameter -area: "Infra/Core" -type: enhancement -issues: - - 100884 diff --git a/docs/changelog/101093.yaml b/docs/changelog/101093.yaml deleted file mode 100644 index 99765170dd257..0000000000000 --- a/docs/changelog/101093.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101093 -summary: Make IPAddress writeable -area: Infra/Scripting -type: bug -issues: - - 101082 diff --git a/docs/changelog/101126.yaml b/docs/changelog/101126.yaml deleted file mode 100644 index 7a0f45891b171..0000000000000 --- a/docs/changelog/101126.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101126 -summary: Include totals in flamegraph response -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/101147.yaml b/docs/changelog/101147.yaml deleted file mode 100644 index cb556af35eead..0000000000000 --- a/docs/changelog/101147.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101147 -summary: Persist data counts on job close before results index refresh -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/101148.yaml b/docs/changelog/101148.yaml deleted file mode 100644 index eabe288e69e88..0000000000000 --- a/docs/changelog/101148.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101148 -summary: Add support for marking component templates as deprecated -area: Indices APIs -type: enhancement -issues: - - 100992 diff --git a/docs/changelog/101185.yaml b/docs/changelog/101185.yaml deleted file mode 100644 index 63d3a4da328b1..0000000000000 --- a/docs/changelog/101185.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101185 -summary: Repo analysis of uncontended register behaviour -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/101202.yaml b/docs/changelog/101202.yaml deleted file mode 100644 index 565338a2dbb6e..0000000000000 --- a/docs/changelog/101202.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101202 -summary: Optimize `MurmurHash3` -area: "Ingest Node" -type: enhancement -issues: [] diff --git a/docs/changelog/101230.yaml b/docs/changelog/101230.yaml deleted file mode 100644 index 3ed7eacb3fce0..0000000000000 --- a/docs/changelog/101230.yaml +++ /dev/null @@ -1,12 +0,0 @@ -pr: 101230 -summary: Enable query phase parallelism within a single shard -area: Search -type: enhancement -issues: - - 80693 -highlight: - title: Enable query phase parallelism within a single shard - body: |- - Activate inter-segment search concurrency by default in the query phase, in order to - enable parallelizing search execution across segments that a single shard is made of. - notable: true diff --git a/docs/changelog/101235.yaml b/docs/changelog/101235.yaml deleted file mode 100644 index 53adf9527c2c4..0000000000000 --- a/docs/changelog/101235.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101235 -summary: Load different way -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/101311.yaml b/docs/changelog/101311.yaml deleted file mode 100644 index e4786b937e060..0000000000000 --- a/docs/changelog/101311.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101311 -summary: Cache resolved index for mgets -area: CRUD -type: enhancement -issues: [] diff --git a/docs/changelog/101333.yaml b/docs/changelog/101333.yaml deleted file mode 100644 index 4452687b995d3..0000000000000 --- a/docs/changelog/101333.yaml +++ /dev/null @@ -1,29 +0,0 @@ -pr: 101333 -summary: Fixed JWT principal from claims -area: Authorization -type: breaking -issues: [] -breaking: - title: Fixed JWT principal from claims - area: Authorization - details: "This changes the format of a JWT's principal before the JWT is actually\ - \ validated by any JWT realm. The JWT's principal is a convenient way to refer\ - \ to a JWT that has not yet been verified by a JWT realm. The JWT's principal\ - \ is printed in the audit and regular logs (notably for auditing authn failures)\ - \ as well as the smart realm chain reordering optimization. The JWT principal\ - \ is NOT required to be identical to the JWT-authenticated user's principal, but\ - \ in general, they should be similar. Previously, the JWT's principal was built\ - \ by individual realms in the same way the realms built the authenticated user's\ - \ principal. This had the advantage that, in simpler JWT realms configurations\ - \ (e.g. a single JWT realm in the chain), the JWT principal and the authenticated\ - \ user's principal are very similar. However the drawback is that, in general,\ - \ the JWT principal and the user principal can be very different (i.e. in the\ - \ case where one JWT realm builds the JWT principal and a different one builds\ - \ the user principal). Another downside is that the (unauthenticated) JWT principal\ - \ depended on realm ordering, which makes identifying the JWT from its principal\ - \ dependent on the ES authn realm configuration. This PR implements a consistent\ - \ fixed logic to build the JWT principal, which only depends on the JWT's claims\ - \ and no ES configuration." - impact: "Users will observe changed format and values for the `user.name` attribute\ - \ of `authentication_failed` audit log events, in the JWT (failed) authn case." - notable: false diff --git a/docs/changelog/101346.yaml b/docs/changelog/101346.yaml deleted file mode 100644 index b32b123c506d1..0000000000000 --- a/docs/changelog/101346.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101346 -summary: Report full stack trace for non-state file settings transforms -area: Infra/Settings -type: bug -issues: [] diff --git a/docs/changelog/101383.yaml b/docs/changelog/101383.yaml deleted file mode 100644 index 4875403acfaeb..0000000000000 --- a/docs/changelog/101383.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101383 -summary: "ESQL: Track memory from values loaded from lucene" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/101385.yaml b/docs/changelog/101385.yaml deleted file mode 100644 index 406ed804cbbcc..0000000000000 --- a/docs/changelog/101385.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101385 -summary: "ESQL: Fix planning of MV_EXPAND with foldable expressions" -area: ES|QL -type: bug -issues: - - 101118 diff --git a/docs/changelog/101390.yaml b/docs/changelog/101390.yaml deleted file mode 100644 index 23bdef6e39dfe..0000000000000 --- a/docs/changelog/101390.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101390 -summary: Enable inter-segment concurrency for terms aggs -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/101392.yaml b/docs/changelog/101392.yaml deleted file mode 100644 index af79917245726..0000000000000 --- a/docs/changelog/101392.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101392 -summary: Include ML processor limits in `_ml/info` response -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/101396.yaml b/docs/changelog/101396.yaml deleted file mode 100644 index a486b2bed9237..0000000000000 --- a/docs/changelog/101396.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101396 -summary: "ESQL: Track blocks emitted from lucene" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/101409.yaml b/docs/changelog/101409.yaml deleted file mode 100644 index 82e7f339fdd89..0000000000000 --- a/docs/changelog/101409.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101409 -summary: Adding a simulate ingest api -area: Ingest Node -type: feature -issues: [] diff --git a/docs/changelog/101423.yaml b/docs/changelog/101423.yaml deleted file mode 100644 index a5497d444797f..0000000000000 --- a/docs/changelog/101423.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101423 -summary: Export circuit breaker trip count as a counter metric -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/101426.yaml b/docs/changelog/101426.yaml deleted file mode 100644 index f9053ba1c1ec1..0000000000000 --- a/docs/changelog/101426.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101426 -summary: Add undesired shard count -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/101457.yaml b/docs/changelog/101457.yaml deleted file mode 100644 index 03bdbe39b5b8e..0000000000000 --- a/docs/changelog/101457.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 101457 -summary: "Remove Plugin.createComponents method in favour of overload with a PluginServices object" -area: Infra/Plugins -type: breaking-java -breaking: - area: "Java API" - title: "Plugin.createComponents method has been refactored to take a single PluginServices object" - details: > - Plugin.createComponents currently takes several different service arguments. The signature of this method changes - every time a new service is added. The method has now been modified to take a single interface object - that new services are added to. This will reduce API incompatibility issues when a new service - is introduced in the future. - impact: "Plugins that override createComponents will need to be refactored to override the new method on ES 8.12+" - notable: false diff --git a/docs/changelog/101474.yaml b/docs/changelog/101474.yaml deleted file mode 100644 index 2c013fe5d2537..0000000000000 --- a/docs/changelog/101474.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101474 -summary: "[Search Applications] Return 400 response when template rendering produces invalid JSON" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/101488.yaml b/docs/changelog/101488.yaml deleted file mode 100644 index 1db48a63f8542..0000000000000 --- a/docs/changelog/101488.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101488 -summary: "ESQL: More tracking in `BlockHash` impls" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/101518.yaml b/docs/changelog/101518.yaml deleted file mode 100644 index 53db542640348..0000000000000 --- a/docs/changelog/101518.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101518 -summary: Check that scripts produce correct json in render template action -area: Search -type: bug -issues: - - 101477 diff --git a/docs/changelog/101535.yaml b/docs/changelog/101535.yaml deleted file mode 100644 index 79ed78fa1d7a1..0000000000000 --- a/docs/changelog/101535.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101535 -summary: Disable inter-segment concurrency when sorting by field -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/101577.yaml b/docs/changelog/101577.yaml deleted file mode 100644 index e485fd3811cb6..0000000000000 --- a/docs/changelog/101577.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101577 -summary: Add metrics to the shared blob cache -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/101585.yaml b/docs/changelog/101585.yaml deleted file mode 100644 index 71815df1f48d9..0000000000000 --- a/docs/changelog/101585.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101585 -summary: Reroute on shard snapshot completion -area: Snapshot/Restore -type: bug -issues: - - 101514 diff --git a/docs/changelog/101607.yaml b/docs/changelog/101607.yaml deleted file mode 100644 index 18ee7f1bdc5cc..0000000000000 --- a/docs/changelog/101607.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101607 -summary: Log stacktrace together with log message in order to help debugging -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/101609.yaml b/docs/changelog/101609.yaml deleted file mode 100644 index 27993574743d2..0000000000000 --- a/docs/changelog/101609.yaml +++ /dev/null @@ -1,9 +0,0 @@ -pr: 101609 -summary: > - Add a node feature join barrier. This prevents nodes from joining clusters that do not have - all the features already present in the cluster. This ensures that once a features is supported - by all the nodes in a cluster, that feature will never then not be supported in the future. - This is the corresponding functionality for the version join barrier, but for features -area: "Cluster Coordination" -type: feature -issues: [] diff --git a/docs/changelog/101660.yaml b/docs/changelog/101660.yaml deleted file mode 100644 index cb3d3118d15a6..0000000000000 --- a/docs/changelog/101660.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101660 -summary: Fall through malformed JWTs to subsequent realms in the chain -area: Authentication -type: bug -issues: - - 101367 diff --git a/docs/changelog/101682.yaml b/docs/changelog/101682.yaml deleted file mode 100644 index e512006057581..0000000000000 --- a/docs/changelog/101682.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101682 -summary: "Add manage_enrich cluster privilege to kibana_system role" -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/101700.yaml b/docs/changelog/101700.yaml deleted file mode 100644 index 08671360688a7..0000000000000 --- a/docs/changelog/101700.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101700 -summary: Fix `lastUnsafeSegmentGenerationForGets` for realtime get -area: Engine -type: bug -issues: [] diff --git a/docs/changelog/101723.yaml b/docs/changelog/101723.yaml deleted file mode 100644 index 146d164805f00..0000000000000 --- a/docs/changelog/101723.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101723 -summary: Allowing non-dynamic index settings to be updated by automatically unassigning - shards -area: Indices APIs -type: enhancement -issues: [] diff --git a/docs/changelog/101727.yaml b/docs/changelog/101727.yaml deleted file mode 100644 index 24a7e1d5b4e48..0000000000000 --- a/docs/changelog/101727.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101727 -summary: Fix listeners in `SharedBlobCacheService.readMultiRegions` -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/101753.yaml b/docs/changelog/101753.yaml deleted file mode 100644 index 7b64075998430..0000000000000 --- a/docs/changelog/101753.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101753 -summary: Expose roles by default in cat allocation API -area: CAT APIs -type: enhancement -issues: [] diff --git a/docs/changelog/101788.yaml b/docs/changelog/101788.yaml deleted file mode 100644 index b7cc1e20663e8..0000000000000 --- a/docs/changelog/101788.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101788 -summary: "ESQL: Narrow catch in convert functions" -area: ES|QL -type: bug -issues: - - 100820 diff --git a/docs/changelog/101802.yaml b/docs/changelog/101802.yaml deleted file mode 100644 index 20e857c32f664..0000000000000 --- a/docs/changelog/101802.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101802 -summary: Correctly logging watcher history write failures -area: Watcher -type: bug -issues: [] diff --git a/docs/changelog/101815.yaml b/docs/changelog/101815.yaml deleted file mode 100644 index 511e23beb68ef..0000000000000 --- a/docs/changelog/101815.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101815 -summary: Run `TransportGetAliasesAction` on local node -area: Indices APIs -type: enhancement -issues: [] diff --git a/docs/changelog/101826.yaml b/docs/changelog/101826.yaml deleted file mode 100644 index 87f3f8df1b0c2..0000000000000 --- a/docs/changelog/101826.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101826 -summary: Support keyed histograms -area: Aggregations -type: enhancement -issues: - - 100242 diff --git a/docs/changelog/101845.yaml b/docs/changelog/101845.yaml deleted file mode 100644 index 0dd95bdabca57..0000000000000 --- a/docs/changelog/101845.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101845 -summary: Introduce new endpoint to expose data stream lifecycle stats -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/101846.yaml b/docs/changelog/101846.yaml deleted file mode 100644 index 52dfff8801c62..0000000000000 --- a/docs/changelog/101846.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101846 -summary: Set `ActiveProcessorCount` when `node.processors` is set -area: Infra/CLI -type: enhancement -issues: [] diff --git a/docs/changelog/101847.yaml b/docs/changelog/101847.yaml deleted file mode 100644 index 91922b9e23ed0..0000000000000 --- a/docs/changelog/101847.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101847 -summary: Add an additional tiebreaker to RRF -area: Ranking -type: bug -issues: - - 101232 diff --git a/docs/changelog/101859.yaml b/docs/changelog/101859.yaml deleted file mode 100644 index 54f3fb12810ca..0000000000000 --- a/docs/changelog/101859.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101859 -summary: Cover head/tail commands edge cases and data types coverage -area: EQL -type: bug -issues: - - 101724 diff --git a/docs/changelog/101868.yaml b/docs/changelog/101868.yaml deleted file mode 100644 index d7cf650d25ed2..0000000000000 --- a/docs/changelog/101868.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101868 -summary: Read scores from downloaded vocabulary for XLM Roberta tokenizers -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/101904.yaml b/docs/changelog/101904.yaml deleted file mode 100644 index cad422cc52e15..0000000000000 --- a/docs/changelog/101904.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101904 -summary: Allow granting API keys with JWT as the access_token -area: Security -type: feature -issues: [] diff --git a/docs/changelog/101979.yaml b/docs/changelog/101979.yaml deleted file mode 100644 index ad119df24d36f..0000000000000 --- a/docs/changelog/101979.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101979 -summary: Calculate CO2 and emmission and costs -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/101989.yaml b/docs/changelog/101989.yaml deleted file mode 100644 index d294d194bd4e8..0000000000000 --- a/docs/changelog/101989.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101989 -summary: Add message field to `HealthPeriodicLogger` and `S3RequestRetryStats` -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/102020.yaml b/docs/changelog/102020.yaml deleted file mode 100644 index 7c74e9676d342..0000000000000 --- a/docs/changelog/102020.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102020 -summary: Retrieve stacktrace events from a custom index -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/102032.yaml b/docs/changelog/102032.yaml deleted file mode 100644 index 40463b9f252b9..0000000000000 --- a/docs/changelog/102032.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102032 -summary: Add vector_operation_count in profile output for knn searches -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/102048.yaml b/docs/changelog/102048.yaml deleted file mode 100644 index 54bc1d9eae52e..0000000000000 --- a/docs/changelog/102048.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102048 -summary: "Repo analysis: verify empty register" -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/102051.yaml b/docs/changelog/102051.yaml deleted file mode 100644 index c3ca4a546928f..0000000000000 --- a/docs/changelog/102051.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102051 -summary: "Repo analysis: allow configuration of register ops" -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/102056.yaml b/docs/changelog/102056.yaml deleted file mode 100644 index 455f66ba90b03..0000000000000 --- a/docs/changelog/102056.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102056 -summary: Use `BulkRequest` to store Application Privileges -area: Authorization -type: enhancement -issues: [] diff --git a/docs/changelog/102065.yaml b/docs/changelog/102065.yaml deleted file mode 100644 index 1a9a219df4502..0000000000000 --- a/docs/changelog/102065.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102065 -summary: Add more desired balance stats -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/102075.yaml b/docs/changelog/102075.yaml deleted file mode 100644 index 54daae04169db..0000000000000 --- a/docs/changelog/102075.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102075 -summary: Accept a single or multiple inputs to `_inference` -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102089.yaml b/docs/changelog/102089.yaml deleted file mode 100644 index 9f33c0648d09f..0000000000000 --- a/docs/changelog/102089.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102089 -summary: Add prefix strings option to trained models -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102093.yaml b/docs/changelog/102093.yaml deleted file mode 100644 index f6922c0d36be6..0000000000000 --- a/docs/changelog/102093.yaml +++ /dev/null @@ -1,14 +0,0 @@ -pr: 102093 -summary: Add byte quantization for float vectors in HNSW -area: Vector Search -type: feature -issues: [] -highlight: - title: Add new `int8_hsnw` index type for int8 quantization for HNSW - body: |- - This commit adds a new index type called `int8_hnsw`. This index will - automatically quantized float32 values into int8 byte values. While - this increases disk usage by 25%, it reduces memory required for - fast HNSW search by 75%. Dramatically reducing the resource overhead - required for dense vector search. - notable: true diff --git a/docs/changelog/102138.yaml b/docs/changelog/102138.yaml deleted file mode 100644 index 3819e3201150e..0000000000000 --- a/docs/changelog/102138.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102138 -summary: Skip shards that don't match the source query during checkpointing -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/102140.yaml b/docs/changelog/102140.yaml deleted file mode 100644 index 0f086649b9710..0000000000000 --- a/docs/changelog/102140.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102140 -summary: Collect data tiers usage stats more efficiently -area: ILM+SLM -type: bug -issues: - - 100230 \ No newline at end of file diff --git a/docs/changelog/102165.yaml b/docs/changelog/102165.yaml deleted file mode 100644 index e1c4c76f1f6ff..0000000000000 --- a/docs/changelog/102165.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102165 -summary: Fix planning of duplicate aggs -area: ES|QL -type: bug -issues: - - 102083 diff --git a/docs/changelog/102172.yaml b/docs/changelog/102172.yaml deleted file mode 100644 index 485c2c4327e11..0000000000000 --- a/docs/changelog/102172.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102172 -summary: Adjust Histogram's bucket accounting to be iteratively -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/102177.yaml b/docs/changelog/102177.yaml deleted file mode 100644 index 62d7b11b86513..0000000000000 --- a/docs/changelog/102177.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102177 -summary: "GEO_POINT and CARTESIAN_POINT type support" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/102183.yaml b/docs/changelog/102183.yaml deleted file mode 100644 index 3daa1418ba5d0..0000000000000 --- a/docs/changelog/102183.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 102183 -summary: "[ES|QL] pow function always returns double" -area: ES|QL -type: "breaking" -issues: - - 99055 -breaking: - title: "[ES|QL] pow function always returns double" - area: REST API - details: "In ES|QL, the pow function no longer returns the type of its inputs, instead\ - \ always returning a double." - impact: low. Most queries should continue to function with the change. - notable: false diff --git a/docs/changelog/102184.yaml b/docs/changelog/102184.yaml deleted file mode 100644 index ba4d045b6b0aa..0000000000000 --- a/docs/changelog/102184.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102184 -summary: Track ESQL enrich memory -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102188.yaml b/docs/changelog/102188.yaml deleted file mode 100644 index 595a8395fab5c..0000000000000 --- a/docs/changelog/102188.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102188 -summary: Track blocks in `AsyncOperator` -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102190.yaml b/docs/changelog/102190.yaml deleted file mode 100644 index cd04e041fca5e..0000000000000 --- a/docs/changelog/102190.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102190 -summary: Track pages in ESQL enrich request/response -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102192.yaml b/docs/changelog/102192.yaml deleted file mode 100644 index 531aa943c9e36..0000000000000 --- a/docs/changelog/102192.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102192 -summary: "ESQL: Load more than one field at once" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102193.yaml b/docs/changelog/102193.yaml deleted file mode 100644 index 4d64493602ff2..0000000000000 --- a/docs/changelog/102193.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102193 -summary: Fix cache invalidation on privilege modification -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/102208.yaml b/docs/changelog/102208.yaml deleted file mode 100644 index b566a85753d82..0000000000000 --- a/docs/changelog/102208.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102208 -summary: Add static node settings to set default values for max merged segment sizes -area: Engine -type: enhancement -issues: [] diff --git a/docs/changelog/102244.yaml b/docs/changelog/102244.yaml deleted file mode 100644 index 3b160e033b57e..0000000000000 --- a/docs/changelog/102244.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102244 -summary: Expose reconciliation metrics via APM -area: Allocation -type: enhancement -issues: [] diff --git a/docs/changelog/102245.yaml b/docs/changelog/102245.yaml deleted file mode 100644 index 387540d96290c..0000000000000 --- a/docs/changelog/102245.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102245 -summary: Add non-green indicator names to `HealthPeriodicLogger` message -area: Health -type: enhancement -issues: [] diff --git a/docs/changelog/102248.yaml b/docs/changelog/102248.yaml deleted file mode 100644 index 854e8afde4086..0000000000000 --- a/docs/changelog/102248.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102248 -summary: Node stats as metrics -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/102273.yaml b/docs/changelog/102273.yaml deleted file mode 100644 index 78ecc8b2d2734..0000000000000 --- a/docs/changelog/102273.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102273 -summary: Improve analyzer reload log message -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/102292.yaml b/docs/changelog/102292.yaml deleted file mode 100644 index 953c3ffdf6150..0000000000000 --- a/docs/changelog/102292.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102292 -summary: Consider duplicate stacktraces in custom index -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/102317.yaml b/docs/changelog/102317.yaml deleted file mode 100644 index 89b2ae5432101..0000000000000 --- a/docs/changelog/102317.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102317 -summary: "ESQL: Fix single value query" -area: ES|QL -type: bug -issues: - - 102298 diff --git a/docs/changelog/102350.yaml b/docs/changelog/102350.yaml deleted file mode 100644 index 00a311c5d99f8..0000000000000 --- a/docs/changelog/102350.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102350 -summary: "ESQL: Fix rare bug with empty string" -area: ES|QL -type: bug -issues: - - 101969 diff --git a/docs/changelog/102379.yaml b/docs/changelog/102379.yaml deleted file mode 100644 index 0773b137779a5..0000000000000 --- a/docs/changelog/102379.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102379 -summary: Pass source query to `_field_caps` (as `index_filter`) when deducing destination index mappings for better - performance -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/102388.yaml b/docs/changelog/102388.yaml deleted file mode 100644 index 3e65e46949bda..0000000000000 --- a/docs/changelog/102388.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102388 -summary: Add support for `index_filter` to open pit -area: Search -type: enhancement -issues: - - 99740 diff --git a/docs/changelog/102391.yaml b/docs/changelog/102391.yaml deleted file mode 100644 index 5fcbb9e6d2858..0000000000000 --- a/docs/changelog/102391.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102391 -summary: "ESQL: Support the `_source` metadata field" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102417.yaml b/docs/changelog/102417.yaml deleted file mode 100644 index 09c1a4f49dbfd..0000000000000 --- a/docs/changelog/102417.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102417 -summary: "ESQL: emit warnings from single-value functions processing multi-values" -area: ES|QL -type: feature -issues: - - 98743 diff --git a/docs/changelog/102426.yaml b/docs/changelog/102426.yaml deleted file mode 100644 index 3aad50ed1eee0..0000000000000 --- a/docs/changelog/102426.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102426 -summary: Patterns support for allowed subjects by the JWT realm -area: Authentication -type: feature -issues: [] diff --git a/docs/changelog/102434.yaml b/docs/changelog/102434.yaml deleted file mode 100644 index ab6aa886c13b1..0000000000000 --- a/docs/changelog/102434.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102434 -summary: "ESQL: Short circuit loading empty doc values" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102447.yaml b/docs/changelog/102447.yaml deleted file mode 100644 index 76823153670bd..0000000000000 --- a/docs/changelog/102447.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102447 -summary: Pass transform source query as `index_filter` to `open_point_in_time` request -area: Transform -type: enhancement -issues: - - 101049 diff --git a/docs/changelog/102456.yaml b/docs/changelog/102456.yaml deleted file mode 100644 index 6ef3b8f16f53c..0000000000000 --- a/docs/changelog/102456.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102456 -summary: Switch logs data streams to search all fields by default -area: Data streams -type: enhancement -issues: - - 99872 diff --git a/docs/changelog/102461.yaml b/docs/changelog/102461.yaml deleted file mode 100644 index c0c07554ed21f..0000000000000 --- a/docs/changelog/102461.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102461 -summary: Enable concurrency for scripted metric agg -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/102462.yaml b/docs/changelog/102462.yaml deleted file mode 100644 index d44ccc4cbbc5c..0000000000000 --- a/docs/changelog/102462.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102462 -summary: Check the real memory circuit breaker when building global ordinals -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/102472.yaml b/docs/changelog/102472.yaml deleted file mode 100644 index b0f5bfc714643..0000000000000 --- a/docs/changelog/102472.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102472 -summary: Expose the `invalidation` field in Get/Query `ApiKey` APIs -area: Security -type: enhancement -issues: [ ] diff --git a/docs/changelog/102476.yaml b/docs/changelog/102476.yaml deleted file mode 100644 index a53a20ecfec20..0000000000000 --- a/docs/changelog/102476.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102476 -summary: Unwrap `ExecutionException` when loading from cache in `AbstractIndexOrdinalsFieldData` -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/102490.yaml b/docs/changelog/102490.yaml deleted file mode 100644 index 8ff554ab0f0fe..0000000000000 --- a/docs/changelog/102490.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102490 -summary: "ESQL: Load text field from parent keyword field" -area: ES|QL -type: enhancement -issues: - - 102473 diff --git a/docs/changelog/102495.yaml b/docs/changelog/102495.yaml deleted file mode 100644 index 77ae42f7eebcb..0000000000000 --- a/docs/changelog/102495.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102495 -summary: "Add support for configuring proxy scheme in S3 client settings and EC2 discovery plugin" -area: Distributed -type: enhancement -issues: - - 101873 diff --git a/docs/changelog/102510.yaml b/docs/changelog/102510.yaml deleted file mode 100644 index 2b654b5c85929..0000000000000 --- a/docs/changelog/102510.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 102510 -summary: "ESQL: Make fieldcaps calls lighter" -area: ES|QL -type: enhancement -issues: - - 101763 - - 102393 diff --git a/docs/changelog/102511.yaml b/docs/changelog/102511.yaml deleted file mode 100644 index cf80ca03e197f..0000000000000 --- a/docs/changelog/102511.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102511 -summary: Trigger parent circuit breaker when building scorers in filters aggregation -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/102512.yaml b/docs/changelog/102512.yaml deleted file mode 100644 index d4bc765ecaf5f..0000000000000 --- a/docs/changelog/102512.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102512 -summary: Implement exponential backoff for transform state persistence retrying -area: Transform -type: enhancement -issues: - - 102528 diff --git a/docs/changelog/102562.yaml b/docs/changelog/102562.yaml deleted file mode 100644 index a4b4f5a095118..0000000000000 --- a/docs/changelog/102562.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102562 -summary: Track blocks of intermediate state of aggs -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102570.yaml b/docs/changelog/102570.yaml deleted file mode 100644 index 2d3f878dbbb27..0000000000000 --- a/docs/changelog/102570.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102570 -summary: Added `beat.stats.libbeat.pipeline.queue.max_events` -area: Monitoring -type: enhancement -issues: [] diff --git a/docs/changelog/102571.yaml b/docs/changelog/102571.yaml deleted file mode 100644 index 25272408161db..0000000000000 --- a/docs/changelog/102571.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102571 -summary: Allow executing multiple periodic flushes while they are being made durable -area: Store -type: enhancement -issues: [] diff --git a/docs/changelog/102598.yaml b/docs/changelog/102598.yaml deleted file mode 100644 index c32519acdf6d1..0000000000000 --- a/docs/changelog/102598.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102598 -summary: Add apm api for asynchronous counters (always increasing) -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/102602.yaml b/docs/changelog/102602.yaml deleted file mode 100644 index dd01eaa98b214..0000000000000 --- a/docs/changelog/102602.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102602 -summary: Consider search context missing exceptions as recoverable -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/102612.yaml b/docs/changelog/102612.yaml deleted file mode 100644 index 60808ae72801a..0000000000000 --- a/docs/changelog/102612.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102612 -summary: Track blocks when hashing single multi-valued field -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102636.yaml b/docs/changelog/102636.yaml deleted file mode 100644 index 8b32e0568b0fb..0000000000000 --- a/docs/changelog/102636.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102636 -summary: Revert non-semantic `NodeInfo` -area: Infra/Core -type: regression -issues: [] diff --git a/docs/changelog/102637.yaml b/docs/changelog/102637.yaml deleted file mode 100644 index 4d5d689934bd6..0000000000000 --- a/docs/changelog/102637.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102637 -summary: Improve stability of spike and dip detection for the change point aggregation -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102644.yaml b/docs/changelog/102644.yaml deleted file mode 100644 index 17c5cbebed7cc..0000000000000 --- a/docs/changelog/102644.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102644 -summary: Disable parallelism for composite agg against high cardinality fields -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/102673.yaml b/docs/changelog/102673.yaml deleted file mode 100644 index 16546edb3cf3c..0000000000000 --- a/docs/changelog/102673.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102673 -summary: "ESQL: Share constant null Blocks" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102680.yaml b/docs/changelog/102680.yaml deleted file mode 100644 index 8b32c5029ea2a..0000000000000 --- a/docs/changelog/102680.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102680 -summary: Make `api_key.delete.interval` a dynamic setting -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/102682.yaml b/docs/changelog/102682.yaml deleted file mode 100644 index 190ff3df5a7f6..0000000000000 --- a/docs/changelog/102682.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102682 -summary: Introduce fielddata cache ttl -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/102710.yaml b/docs/changelog/102710.yaml deleted file mode 100644 index ee805c70180a0..0000000000000 --- a/docs/changelog/102710.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102710 -summary: Enable concurrency for multi terms agg -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/102713.yaml b/docs/changelog/102713.yaml deleted file mode 100644 index 278d7d4ffb129..0000000000000 --- a/docs/changelog/102713.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102713 -summary: "ESQL: Add `profile` option" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102727.yaml b/docs/changelog/102727.yaml deleted file mode 100644 index 4f4d4fbf48899..0000000000000 --- a/docs/changelog/102727.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102727 -summary: "ESQL: Load stored fields sequentially" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102731.yaml b/docs/changelog/102731.yaml deleted file mode 100644 index a12e04bfab078..0000000000000 --- a/docs/changelog/102731.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102731 -summary: Add internal inference action for ml models an services -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102735.yaml b/docs/changelog/102735.yaml deleted file mode 100644 index 4726e08d1f314..0000000000000 --- a/docs/changelog/102735.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102735 -summary: "[Profiling] Report in status API if docs exist" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/102740.yaml b/docs/changelog/102740.yaml deleted file mode 100644 index b7fc10eb19ddb..0000000000000 --- a/docs/changelog/102740.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102740 -summary: "[Profiling] Notify early about task cancellation" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/102767.yaml b/docs/changelog/102767.yaml deleted file mode 100644 index cf1edeeb51265..0000000000000 --- a/docs/changelog/102767.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102767 -summary: "ESQL: remove `time_zone` request parameter" -area: ES|QL -type: bug -issues: - - 102159 diff --git a/docs/changelog/102806.yaml b/docs/changelog/102806.yaml deleted file mode 100644 index faa971ec1d879..0000000000000 --- a/docs/changelog/102806.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102806 -summary: Support for GET all models and by task type in the `_inference` API -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102808.yaml b/docs/changelog/102808.yaml deleted file mode 100644 index 4e3df80a28319..0000000000000 --- a/docs/changelog/102808.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102808 -summary: Active shards message corrected for search shards -area: Distributed -type: bug -issues: - - 101896 diff --git a/docs/changelog/102810.yaml b/docs/changelog/102810.yaml deleted file mode 100644 index f5faf7a321dbc..0000000000000 --- a/docs/changelog/102810.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102810 -summary: Add memory utilization Kibana metric to the monitoring index templates -area: Monitoring -type: enhancement -issues: [] diff --git a/docs/changelog/102811.yaml b/docs/changelog/102811.yaml deleted file mode 100644 index 039a337a53e87..0000000000000 --- a/docs/changelog/102811.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102811 -summary: Split comma-separated source index strings into separate indices -area: Transform -type: bug -issues: - - 99564 diff --git a/docs/changelog/102832.yaml b/docs/changelog/102832.yaml deleted file mode 100644 index 7daf22263b2e9..0000000000000 --- a/docs/changelog/102832.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102832 -summary: Disable concurrency for sampler and diversified sampler -area: Aggregations -type: enhancement -issues: [] diff --git a/docs/changelog/102840.yaml b/docs/changelog/102840.yaml deleted file mode 100644 index 1d87cede632c9..0000000000000 --- a/docs/changelog/102840.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102840 -summary: Fail S3 repository analysis on partial reads -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/102844.yaml b/docs/changelog/102844.yaml deleted file mode 100644 index d05547c3aa9da..0000000000000 --- a/docs/changelog/102844.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102844 -summary: Skip global ordinals loading if query does not match after rewrite -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/102877.yaml b/docs/changelog/102877.yaml deleted file mode 100644 index da2de19b19a90..0000000000000 --- a/docs/changelog/102877.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102877 -summary: Add basic telelemetry for the inference feature -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102888.yaml b/docs/changelog/102888.yaml deleted file mode 100644 index 79ea9cbe712de..0000000000000 --- a/docs/changelog/102888.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102888 -summary: "Optimize `_count` type API requests" -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/102901.yaml b/docs/changelog/102901.yaml deleted file mode 100644 index ac417691b525c..0000000000000 --- a/docs/changelog/102901.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102901 -summary: Introduce local block factory -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102902.yaml b/docs/changelog/102902.yaml deleted file mode 100644 index b33afdd35a603..0000000000000 --- a/docs/changelog/102902.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102902 -summary: Fast path for reading single doc with ordinals -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102906.yaml b/docs/changelog/102906.yaml deleted file mode 100644 index 3efaa2db58390..0000000000000 --- a/docs/changelog/102906.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102906 -summary: Introduce a `StreamOutput` that counts how many bytes are written to the - stream -area: Distributed -type: enhancement -issues: [] diff --git a/docs/changelog/102916.yaml b/docs/changelog/102916.yaml deleted file mode 100644 index 3943f34d91221..0000000000000 --- a/docs/changelog/102916.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102916 -summary: Fix layout for MV_EXPAND -area: ES|QL -type: bug -issues: - - 102912 diff --git a/docs/changelog/102919.yaml b/docs/changelog/102919.yaml deleted file mode 100644 index 0de2e75abc6cf..0000000000000 --- a/docs/changelog/102919.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102919 -summary: Error log when license verification fails locally -area: License -type: bug -issues: [] diff --git a/docs/changelog/102925.yaml b/docs/changelog/102925.yaml deleted file mode 100644 index 5dd15f4f60429..0000000000000 --- a/docs/changelog/102925.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102925 -summary: Add ldap user metadata mappings for full name and email -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/102937.yaml b/docs/changelog/102937.yaml deleted file mode 100644 index 116fbadebe09d..0000000000000 --- a/docs/changelog/102937.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102937 -summary: "ESQL: New telemetry commands" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/102944.yaml b/docs/changelog/102944.yaml deleted file mode 100644 index 58a1bb8f6bbaa..0000000000000 --- a/docs/changelog/102944.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102944 -summary: "If trained model download task is in progress, wait for it to finish before\ - \ executing start trained model deployment" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/102967.yaml b/docs/changelog/102967.yaml deleted file mode 100644 index cdde735f6c077..0000000000000 --- a/docs/changelog/102967.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102967 -summary: "ES|QL: Improve resolution error management in `mv_expand`" -area: ES|QL -type: bug -issues: - - 102964 diff --git a/docs/changelog/102994.yaml b/docs/changelog/102994.yaml deleted file mode 100644 index c35baaefcb723..0000000000000 --- a/docs/changelog/102994.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102994 -summary: Enable Connectors API as technical preview -area: Application -type: feature -issues: [] diff --git a/docs/changelog/103013.yaml b/docs/changelog/103013.yaml deleted file mode 100644 index bb8eb99088856..0000000000000 --- a/docs/changelog/103013.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103013 -summary: Deprecate the unused `elasticsearch_version` field of enrich policy json -area: Ingest Node -type: enhancement -issues: [] diff --git a/docs/changelog/103024.yaml b/docs/changelog/103024.yaml deleted file mode 100644 index e860ad056f980..0000000000000 --- a/docs/changelog/103024.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103024 -summary: Fix template simulate setting application ordering -area: Indices APIs -type: bug -issues: - - 103008 diff --git a/docs/changelog/103061.yaml b/docs/changelog/103061.yaml deleted file mode 100644 index 558429493ac6f..0000000000000 --- a/docs/changelog/103061.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103061 -summary: "[Profiling] Query in parallel only if beneficial" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/103116.yaml b/docs/changelog/103116.yaml deleted file mode 100644 index 402c83e16ec37..0000000000000 --- a/docs/changelog/103116.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103116 -summary: Fix `frequent_item_sets` aggregation on empty index -area: Machine Learning -type: bug -issues: - - 103067 diff --git a/docs/changelog/103124.yaml b/docs/changelog/103124.yaml deleted file mode 100644 index 078c8249bbf5d..0000000000000 --- a/docs/changelog/103124.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103124 -summary: Start a new trace context before loading a trained model -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/103150.yaml b/docs/changelog/103150.yaml deleted file mode 100644 index 3f42c882d89fb..0000000000000 --- a/docs/changelog/103150.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103150 -summary: "ES|QL: Fix NPE on single value detection" -area: ES|QL -type: bug -issues: - - 103141 diff --git a/docs/changelog/103183.yaml b/docs/changelog/103183.yaml deleted file mode 100644 index cb28033cff6a7..0000000000000 --- a/docs/changelog/103183.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103183 -summary: "[Connectors API] Handle nullable fields correctly in the `ConnectorSyncJob`\ - \ parser" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/103185.yaml b/docs/changelog/103185.yaml deleted file mode 100644 index 3a1a4960ba98c..0000000000000 --- a/docs/changelog/103185.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103185 -summary: Fix format string in `OldLuceneVersions` -area: Search -type: bug -issues: [] diff --git a/docs/changelog/103203.yaml b/docs/changelog/103203.yaml deleted file mode 100644 index d2aa3e9961c6a..0000000000000 --- a/docs/changelog/103203.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103203 -summary: Fix NPE & empty result handling in `CountOnlyQueryPhaseResultConsumer` -area: Search -type: bug -issues: [] diff --git a/docs/changelog/103209.yaml b/docs/changelog/103209.yaml deleted file mode 100644 index 05ae8c13bcb5c..0000000000000 --- a/docs/changelog/103209.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103209 -summary: "ESQL: Fix `to_degrees()` returning infinity" -area: ES|QL -type: bug -issues: - - 102987 diff --git a/docs/changelog/103212.yaml b/docs/changelog/103212.yaml deleted file mode 100644 index 3cbbddc8f2229..0000000000000 --- a/docs/changelog/103212.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103212 -summary: Use the eql query filter for the open-pit request -area: EQL -type: enhancement -issues: [] diff --git a/docs/changelog/103251.yaml b/docs/changelog/103251.yaml deleted file mode 100644 index 0c5c6d6e4d776..0000000000000 --- a/docs/changelog/103251.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103251 -summary: Wait for reroute before acking put-shutdown -area: Infra/Node Lifecycle -type: bug -issues: [] diff --git a/docs/changelog/103339.yaml b/docs/changelog/103339.yaml deleted file mode 100644 index 6ea1ab0cf799a..0000000000000 --- a/docs/changelog/103339.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103339 -summary: "ESQL: Fix resolution of MV_EXPAND after KEEP *" -area: ES|QL -type: bug -issues: - - 103331 diff --git a/docs/changelog/103342.yaml b/docs/changelog/103342.yaml deleted file mode 100644 index 32711d7a6b390..0000000000000 --- a/docs/changelog/103342.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103342 -summary: Use dataset size instead of on-disk size for data stream stats -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/103361.yaml b/docs/changelog/103361.yaml deleted file mode 100644 index 441acc09895ef..0000000000000 --- a/docs/changelog/103361.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103361 -summary: Prevent attempts to access non-existent node information during rebalancing -area: Machine Learning -type: bug -issues: [ ] diff --git a/docs/changelog/103408.yaml b/docs/changelog/103408.yaml deleted file mode 100644 index bf5081b854f08..0000000000000 --- a/docs/changelog/103408.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103408 -summary: Cache component versions -area: Infra/Core -type: bug -issues: - - 102103 diff --git a/docs/changelog/103427.yaml b/docs/changelog/103427.yaml deleted file mode 100644 index 57a27aa687ab7..0000000000000 --- a/docs/changelog/103427.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103427 -summary: "[Connector API] Fix bug with nullable tooltip field in parser" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/103430.yaml b/docs/changelog/103430.yaml deleted file mode 100644 index cd2444270849d..0000000000000 --- a/docs/changelog/103430.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103430 -summary: "[Connectors API] Fix bug with missing TEXT `DisplayType` enum" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/103435.yaml b/docs/changelog/103435.yaml deleted file mode 100644 index 95e3c7169ada9..0000000000000 --- a/docs/changelog/103435.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103435 -summary: Dispatch `ClusterStateAction#buildResponse` to executor -area: Distributed -type: bug -issues: [] diff --git a/docs/changelog/103508.yaml b/docs/changelog/103508.yaml deleted file mode 100644 index 9c6f79ef75657..0000000000000 --- a/docs/changelog/103508.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103508 -summary: "[Connectors API] Fix `ClassCastException` when creating a new sync job" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/103530.yaml b/docs/changelog/103530.yaml deleted file mode 100644 index 6feb04467b03e..0000000000000 --- a/docs/changelog/103530.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103530 -summary: Exclude quantiles when fetching model snapshots where possible -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/103546.yaml b/docs/changelog/103546.yaml deleted file mode 100644 index 08584e8555bd4..0000000000000 --- a/docs/changelog/103546.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103546 -summary: Handle timeout on standalone rewrite calls -area: Search -type: bug -issues: [] diff --git a/docs/changelog/103574.yaml b/docs/changelog/103574.yaml deleted file mode 100644 index ed6ad237f49a2..0000000000000 --- a/docs/changelog/103574.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103574 -summary: Samples should check if the aggregations result is empty or null -area: EQL -type: bug -issues: [] diff --git a/docs/changelog/103580.yaml b/docs/changelog/103580.yaml deleted file mode 100644 index 6fd0328017d1f..0000000000000 --- a/docs/changelog/103580.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103580 -summary: Copy counter field properties to downsampled index -area: Downsampling -type: bug -issues: - - 103569 diff --git a/docs/changelog/103591.yaml b/docs/changelog/103591.yaml deleted file mode 100644 index 41b6e362c5713..0000000000000 --- a/docs/changelog/103591.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 103591 -summary: Wait for the model results on graceful shutdown -area: Machine Learning -type: bug -issues: - - 103414 diff --git a/docs/changelog/103601.yaml b/docs/changelog/103601.yaml deleted file mode 100644 index bf7aaaf835e00..0000000000000 --- a/docs/changelog/103601.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 103601 -summary: Introduce Elasticsearch `PostingFormat` based on Lucene 90 positing format - using PFOR -area: Search -type: bug -issues: - - 103002 diff --git a/docs/changelog/103615.yaml b/docs/changelog/103615.yaml deleted file mode 100644 index 69498c749687f..0000000000000 --- a/docs/changelog/103615.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103615 -summary: Fix downsample api by returning a failure in case one or more downsample persistent tasks failed -area: Downsampling -type: bug -issues: [] diff --git a/docs/changelog/103670.yaml b/docs/changelog/103670.yaml deleted file mode 100644 index ad3f0519b5d19..0000000000000 --- a/docs/changelog/103670.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103670 -summary: "ESQL: Improve local folding of aggregates" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/103690.yaml b/docs/changelog/103690.yaml deleted file mode 100644 index fa9076789c1cd..0000000000000 --- a/docs/changelog/103690.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103690 -summary: Restore inter-segment search concurrency with synthetic source is enabled -area: Search -type: bug -issues: [] diff --git a/docs/changelog/103873.yaml b/docs/changelog/103873.yaml deleted file mode 100644 index 937106043ecf4..0000000000000 --- a/docs/changelog/103873.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103873 -summary: Catch exceptions during `pytorch_inference` startup -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/103923.yaml b/docs/changelog/103923.yaml deleted file mode 100644 index 80e6880909f3a..0000000000000 --- a/docs/changelog/103923.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103923 -summary: Preserve response headers in Datafeed preview -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/104029.yaml b/docs/changelog/104029.yaml deleted file mode 100644 index 2b74d3b634dba..0000000000000 --- a/docs/changelog/104029.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104029 -summary: '`AsyncOperator#isFinished` must never return true on failure' -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/104046.yaml b/docs/changelog/104046.yaml deleted file mode 100644 index 9b383611b560a..0000000000000 --- a/docs/changelog/104046.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 104046 -summary: "ESQL: Update the use of some user-caused exceptions" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/104051.yaml b/docs/changelog/104051.yaml deleted file mode 100644 index 1aa6d69f5ae20..0000000000000 --- a/docs/changelog/104051.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 104051 -summary: Fix NPE that is thrown by `_update` API -area: Transform -type: bug -issues: - - 104048 diff --git a/docs/changelog/96968.yaml b/docs/changelog/96968.yaml deleted file mode 100644 index 8cc6d4ac4c284..0000000000000 --- a/docs/changelog/96968.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 96968 -summary: Allow prefix index naming while reindexing from remote -area: Reindex -type: bug -issues: - - 89120 diff --git a/docs/changelog/98874.yaml b/docs/changelog/98874.yaml deleted file mode 100644 index e3eb7b5acc63f..0000000000000 --- a/docs/changelog/98874.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98874 -summary: Estimate the memory required to deploy trained models more accurately -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/98882.yaml b/docs/changelog/98882.yaml deleted file mode 100644 index 9867f098cfd13..0000000000000 --- a/docs/changelog/98882.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99983 -summary: Use non-deprecated SAML callback URL in tests -area: Authorization -type: enhancement -issues: - - 99985 diff --git a/docs/changelog/98883.yaml b/docs/changelog/98883.yaml deleted file mode 100644 index a8525a432d142..0000000000000 --- a/docs/changelog/98883.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99983 -summary: Use non-deprecated SAML callback URL in SAML smoketests -area: Authorization -type: enhancement -issues: - - 99986 diff --git a/docs/changelog/98916.yaml b/docs/changelog/98916.yaml deleted file mode 100644 index a466e3deba009..0000000000000 --- a/docs/changelog/98916.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98916 -summary: Make knn search a query -area: Vector Search -type: feature -issues: [] diff --git a/docs/changelog/99134.yaml b/docs/changelog/99134.yaml deleted file mode 100644 index 10156b9b30066..0000000000000 --- a/docs/changelog/99134.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99134 -summary: Add ability to create a data stream failure store -area: Data streams -type: feature -issues: [] diff --git a/docs/changelog/99445.yaml b/docs/changelog/99445.yaml deleted file mode 100644 index deea5fbf2423c..0000000000000 --- a/docs/changelog/99445.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99445 -summary: Make cosine similarity faster by storing magnitude and normalizing vectors -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/99702.yaml b/docs/changelog/99702.yaml deleted file mode 100644 index 657ff34e045a8..0000000000000 --- a/docs/changelog/99702.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99702 -summary: Making classname optional in Transport protocol -area: Infra/Plugins -type: bug -issues: - - 98584 diff --git a/docs/changelog/99752.yaml b/docs/changelog/99752.yaml deleted file mode 100644 index c137a563bea39..0000000000000 --- a/docs/changelog/99752.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99752 -summary: Pass shard's primary term to Engine#addSegmentGenerationListener -area: Store -type: enhancement -issues: [] diff --git a/docs/changelog/99852.yaml b/docs/changelog/99852.yaml deleted file mode 100644 index 3a26f17737ae8..0000000000000 --- a/docs/changelog/99852.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99852 -summary: Record more detailed HTTP stats -area: Network -type: enhancement -issues: [] diff --git a/docs/changelog/99963.yaml b/docs/changelog/99963.yaml deleted file mode 100644 index 4f03dceeb22aa..0000000000000 --- a/docs/changelog/99963.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99963 -summary: Aggs error codes part 1 -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/99975.yaml b/docs/changelog/99975.yaml deleted file mode 100644 index a34746c27ec99..0000000000000 --- a/docs/changelog/99975.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99975 -summary: Rename component templates and pipelines according to the new naming conventions -area: Indices APIs -type: enhancement -issues: [] diff --git a/docs/changelog/99984.yaml b/docs/changelog/99984.yaml deleted file mode 100644 index 254845591941d..0000000000000 --- a/docs/changelog/99984.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99984 -summary: Switch fleet's built-in ILM policies to use .actions.rollover.max_primary_shard_size -area: ILM+SLM -type: enhancement -issues: - - 99983 From 58cfa5e05acc34a93af19a0dc8a5425fe5fb8b57 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Wed, 17 Jan 2024 15:03:22 -0500 Subject: [PATCH 90/95] Add final 8.12.0 docs --- .../reference/migration/migrate_8_12.asciidoc | 59 ++- docs/reference/release-notes/8.12.0.asciidoc | 396 +++++++++++++++++- 2 files changed, 452 insertions(+), 3 deletions(-) diff --git a/docs/reference/migration/migrate_8_12.asciidoc b/docs/reference/migration/migrate_8_12.asciidoc index d241a35c686d7..48e45de350890 100644 --- a/docs/reference/migration/migrate_8_12.asciidoc +++ b/docs/reference/migration/migrate_8_12.asciidoc @@ -16,5 +16,62 @@ coming::[8.12.0] [[breaking-changes-8.12]] === Breaking changes -There are no breaking changes in {es} 8.12. +There are no breaking changes in 8.12 + +[discrete] +[[notable-changes-8.12]] +=== Notable changes + +There are notable changes in 8.12 that you need to be aware of, items that we may consider as notable changes are + +* Changes to features that are in Technical Preview. +* Changes to log formats. +* Changes to non public APIs. +* Behaviour changes that repair critical bugs. + + +[discrete] +[[breaking_812_authorization_changes]] +==== Authorization changes + +[[fixed_jwt_principal_from_claims]] +.Fixed JWT principal from claims +[%collapsible] +==== +*Details* + +This changes the format of a JWT's principal before the JWT is actually validated by any JWT realm. The JWT's principal is a convenient way to refer to a JWT that has not yet been verified by a JWT realm. The JWT's principal is printed in the audit and regular logs (notably for auditing authn failures) as well as the smart realm chain reordering optimization. The JWT principal is NOT required to be identical to the JWT-authenticated user's principal, but in general, they should be similar. Previously, the JWT's principal was built by individual realms in the same way the realms built the authenticated user's principal. This had the advantage that, in simpler JWT realms configurations (e.g. a single JWT realm in the chain), the JWT principal and the authenticated user's principal are very similar. However the drawback is that, in general, the JWT principal and the user principal can be very different (i.e. in the case where one JWT realm builds the JWT principal and a different one builds the user principal). Another downside is that the (unauthenticated) JWT principal depended on realm ordering, which makes identifying the JWT from its principal dependent on the ES authn realm configuration. This PR implements a consistent fixed logic to build the JWT principal, which only depends on the JWT's claims and no ES configuration. + +*Impact* + +Users will observe changed format and values for the `user.name` attribute of `authentication_failed` audit log events, in the JWT (failed) authn case. +==== + +[discrete] +[[breaking_812_java_api_changes]] +==== Java API changes + +[[plugin_createcomponents_method_has_been_refactored_to_take_single_pluginservices_object]] +.Plugin.createComponents method has been refactored to take a single PluginServices object +[%collapsible] +==== +*Details* + +Plugin.createComponents currently takes several different service arguments. The signature of this method changes every time a new service is added. The method has now been modified to take a single interface object that new services are added to. This will reduce API incompatibility issues when a new service is introduced in the future. + +*Impact* + +Plugins that override createComponents will need to be refactored to override the new method on ES 8.12+ +==== + +[discrete] +[[breaking_812_rest_api_changes]] +==== REST API changes + +[[es_ql_pow_function_always_returns_double]] +.[ES|QL] pow function always returns double +[%collapsible] +==== +*Details* + +In ES|QL, the pow function no longer returns the type of its inputs, instead always returning a double. + +*Impact* + +low. Most queries should continue to function with the change. +==== diff --git a/docs/reference/release-notes/8.12.0.asciidoc b/docs/reference/release-notes/8.12.0.asciidoc index 6355b7c5135db..21941b265ebd2 100644 --- a/docs/reference/release-notes/8.12.0.asciidoc +++ b/docs/reference/release-notes/8.12.0.asciidoc @@ -1,8 +1,400 @@ [[release-notes-8.12.0]] == {es} version 8.12.0 -coming[8.12.0] - Also see <>. +[[breaking-8.12.0]] +[float] +=== Breaking and notable changes + +Authorization:: +* Fixed JWT principal from claims {es-pull}101333[#101333] + +ES|QL:: +* [ES|QL] pow function always returns double {es-pull}102183[#102183] (issue: {es-issue}99055[#99055]) + +Infra/Plugins:: +* Remove Plugin.createComponents method in favour of overload with a PluginServices object {es-pull}101457[#101457] + +[[bug-8.12.0]] +[float] +=== Bug fixes + +Aggregations:: +* Adjust Histogram's bucket accounting to be iteratively {es-pull}102172[#102172] +* Aggs error codes part 1 {es-pull}99963[#99963] +* Skip global ordinals loading if query does not match after rewrite {es-pull}102844[#102844] +* Trigger parent circuit breaker when building scorers in filters aggregation {es-pull}102511[#102511] +* Unwrap `ExecutionException` when loading from cache in `AbstractIndexOrdinalsFieldData` {es-pull}102476[#102476] + +Application:: +* [Connector API] Fix bug in configuration validation parser {es-pull}104198[#104198] +* [Connector API] Fix bug with nullable tooltip field in parser {es-pull}103427[#103427] +* [Connectors API] Fix `ClassCastException` when creating a new sync job {es-pull}103508[#103508] +* [Connectors API] Fix bug with missing TEXT `DisplayType` enum {es-pull}103430[#103430] +* [Connectors API] Handle nullable fields correctly in the `ConnectorSyncJob` parser {es-pull}103183[#103183] +* [Profiling] Query in parallel only if beneficial {es-pull}103061[#103061] +* [Search Applications] Return 400 response when template rendering produces invalid JSON {es-pull}101474[#101474] + +Authentication:: +* Fall through malformed JWTs to subsequent realms in the chain {es-pull}101660[#101660] (issue: {es-issue}101367[#101367]) + +Authorization:: +* Fix cache invalidation on privilege modification {es-pull}102193[#102193] + +Data streams:: +* Use dataset size instead of on-disk size for data stream stats {es-pull}103342[#103342] + +Distributed:: +* Active shards message corrected for search shards {es-pull}102808[#102808] (issue: {es-issue}101896[#101896]) +* Dispatch `ClusterStateAction#buildResponse` to executor {es-pull}103435[#103435] +* Fix listeners in `SharedBlobCacheService.readMultiRegions` {es-pull}101727[#101727] + +Downsampling:: +* Copy counter field properties to downsampled index {es-pull}103580[#103580] (issue: {es-issue}103569[#103569]) +* Fix downsample api by returning a failure in case one or more downsample persistent tasks failed {es-pull}103615[#103615] + +EQL:: +* Cover head/tail commands edge cases and data types coverage {es-pull}101859[#101859] (issue: {es-issue}101724[#101724]) +* Samples should check if the aggregations result is empty or null {es-pull}103574[#103574] + +ES|QL:: +* ESQL: Fix `to_degrees()` returning infinity {es-pull}103209[#103209] (issue: {es-issue}102987[#102987]) +* ESQL: Fix planning of MV_EXPAND with foldable expressions {es-pull}101385[#101385] (issue: {es-issue}101118[#101118]) +* ESQL: Fix rare bug with empty string {es-pull}102350[#102350] (issue: {es-issue}101969[#101969]) +* ESQL: Fix resolution of MV_EXPAND after KEEP * {es-pull}103339[#103339] (issue: {es-issue}103331[#103331]) +* ESQL: Fix single value query {es-pull}102317[#102317] (issue: {es-issue}102298[#102298]) +* ESQL: Improve local folding of aggregates {es-pull}103670[#103670] +* ESQL: Improve pushdown of certain filters {es-pull}103671[#103671] +* ESQL: Narrow catch in convert functions {es-pull}101788[#101788] (issue: {es-issue}100820[#100820]) +* ESQL: Update the use of some user-caused exceptions {es-pull}104046[#104046] +* ESQL: remove `time_zone` request parameter {es-pull}102767[#102767] (issue: {es-issue}102159[#102159]) +* ES|QL: Fix NPE on single value detection {es-pull}103150[#103150] (issue: {es-issue}103141[#103141]) +* ES|QL: Improve resolution error management in `mv_expand` {es-pull}102967[#102967] (issue: {es-issue}102964[#102964]) +* Fix layout for MV_EXPAND {es-pull}102916[#102916] (issue: {es-issue}102912[#102912]) +* Fix planning of duplicate aggs {es-pull}102165[#102165] (issue: {es-issue}102083[#102083]) +* `AsyncOperator#isFinished` must never return true on failure {es-pull}104029[#104029] + +Engine:: +* Fix `lastUnsafeSegmentGenerationForGets` for realtime get {es-pull}101700[#101700] + +Geo:: +* Fix geo tile bounding boxes to be consistent with arithmetic method {es-pull}100826[#100826] (issues: {es-issue}92611[#92611], {es-issue}95574[#95574]) + +ILM+SLM:: +* Collect data tiers usage stats more efficiently {es-pull}102140[#102140] (issue: {es-issue}100230[#100230]) + +Indices APIs:: +* Fix template simulate setting application ordering {es-pull}103024[#103024] (issue: {es-issue}103008[#103008]) + +Infra/Core:: +* Cache component versions {es-pull}103408[#103408] (issue: {es-issue}102103[#102103]) +* Fix metric gauge creation model {es-pull}100609[#100609] + +Infra/Node Lifecycle:: +* Wait for reroute before acking put-shutdown {es-pull}103251[#103251] + +Infra/Plugins:: +* Making classname optional in Transport protocol {es-pull}99702[#99702] (issue: {es-issue}98584[#98584]) + +Infra/Scripting:: +* Make IPAddress writeable {es-pull}101093[#101093] (issue: {es-issue}101082[#101082]) + +Infra/Settings:: +* Report full stack trace for non-state file settings transforms {es-pull}101346[#101346] + +Ingest Node:: +* Sending an index name to `DocumentParsingObserver` that is not ever null {es-pull}100862[#100862] + +License:: +* Error log when license verification fails locally {es-pull}102919[#102919] + +Machine Learning:: +* Catch exceptions during `pytorch_inference` startup {es-pull}103873[#103873] +* Ensure the estimated latitude is within the allowed range {ml-pull}2586[#2586] +* Exclude quantiles when fetching model snapshots where possible {es-pull}103530[#103530] +* Fix `frequent_item_sets` aggregation on empty index {es-pull}103116[#103116] (issue: {es-issue}103067[#103067]) +* If trained model download task is in progress, wait for it to finish before executing start trained model deployment {es-pull}102944[#102944] +* Persist data counts on job close before results index refresh {es-pull}101147[#101147] +* Preserve response headers in Datafeed preview {es-pull}103923[#103923] +* Prevent attempts to access non-existent node information during rebalancing {es-pull}103361[#103361] +* Prevent resource over-subscription in model allocation planner {es-pull}100392[#100392] +* Remove dependency on the IPEX library {ml-pull}2605[#2605] and {ml-pull}2606[#2606] +* Start a new trace context before loading a trained model {es-pull}103124[#103124] +* Wait for the model results on graceful shutdown {es-pull}103591[#103591] (issue: {es-issue}103414[#103414]) + +Monitoring:: +* [Monitoring] Dont get cluster state until recovery {es-pull}100565[#100565] + +Network:: +* Ensure the correct `threadContext` for `RemoteClusterNodesAction` {es-pull}101050[#101050] + +Ranking:: +* Add an additional tiebreaker to RRF {es-pull}101847[#101847] (issue: {es-issue}101232[#101232]) + +Reindex:: +* Allow prefix index naming while reindexing from remote {es-pull}96968[#96968] (issue: {es-issue}89120[#89120]) + +Search:: +* Add JIT compiler excludes for `computeCommonPrefixLengthAndBuildHistogram` {es-pull}103112[#103112] +* Check that scripts produce correct json in render template action {es-pull}101518[#101518] (issue: {es-issue}101477[#101477]) +* Fix NPE & empty result handling in `CountOnlyQueryPhaseResultConsumer` {es-pull}103203[#103203] +* Fix format string in `OldLuceneVersions` {es-pull}103185[#103185] +* Handle timeout on standalone rewrite calls {es-pull}103546[#103546] +* Introduce Elasticsearch `PostingFormat` based on Lucene 90 positing format using PFOR {es-pull}103601[#103601] (issue: {es-issue}103002[#103002]) +* Restore inter-segment search concurrency with synthetic source is enabled {es-pull}103690[#103690] +* Support complex datemath expressions in index and index alias names {es-pull}100646[#100646] + +Snapshot/Restore:: +* More consistent logging messages for snapshot deletion {es-pull}101024[#101024] +* Reroute on shard snapshot completion {es-pull}101585[#101585] (issue: {es-issue}101514[#101514]) + +TSDB:: +* Throw when wrapping rate agg in `DeferableBucketAggregator` {es-pull}101032[#101032] + +Transform:: +* Add an assertion to the testTransformFeatureReset test case {es-pull}100287[#100287] +* Consider search context missing exceptions as recoverable {es-pull}102602[#102602] +* Consider task cancelled exceptions as recoverable {es-pull}100828[#100828] +* Fix NPE that is thrown by `_update` API {es-pull}104051[#104051] (issue: {es-issue}104048[#104048]) +* Log stacktrace together with log message in order to help debugging {es-pull}101607[#101607] +* Split comma-separated source index strings into separate indices {es-pull}102811[#102811] (issue: {es-issue}99564[#99564]) + +Vector Search:: +* Disallow vectors whose magnitudes will not fit in a float {es-pull}100519[#100519] + +Watcher:: +* Correctly logging watcher history write failures {es-pull}101802[#101802] + +[[enhancement-8.12.0]] +[float] +=== Enhancements + +Aggregations:: +* Check the real memory circuit breaker when building global ordinals {es-pull}102462[#102462] +* Disable concurrency for sampler and diversified sampler {es-pull}102832[#102832] +* Disable parallelism for composite agg against high cardinality fields {es-pull}102644[#102644] +* Enable concurrency for multi terms agg {es-pull}102710[#102710] +* Enable concurrency for scripted metric agg {es-pull}102461[#102461] +* Enable inter-segment concurrency for terms aggs {es-pull}101390[#101390] +* Export circuit breaker trip count as a counter metric {es-pull}101423[#101423] +* Introduce fielddata cache ttl {es-pull}102682[#102682] +* Status codes for Aggregation errors, part 2 {es-pull}100368[#100368] +* Support keyed histograms {es-pull}101826[#101826] (issue: {es-issue}100242[#100242]) + +Allocation:: +* Add more desired balance stats {es-pull}102065[#102065] +* Add undesired shard count {es-pull}101426[#101426] +* Expose reconciliation metrics via APM {es-pull}102244[#102244] + +Application:: +* Calculate CO2 and emmission and costs {es-pull}101979[#101979] +* Consider duplicate stacktraces in custom index {es-pull}102292[#102292] +* Enable Universal Profiling as Enterprise feature {es-pull}100333[#100333] +* Include totals in flamegraph response {es-pull}101126[#101126] +* Retrieve stacktrace events from a custom index {es-pull}102020[#102020] +* [Profiling] Notify early about task cancellation {es-pull}102740[#102740] +* [Profiling] Report in status API if docs exist {es-pull}102735[#102735] + +Authentication:: +* Add ldap user metadata mappings for full name and email {es-pull}102925[#102925] +* Add manage_enrich cluster privilege to kibana_system role {es-pull}101682[#101682] + +Authorization:: +* Remove `auto_configure` privilege for profiling {es-pull}101026[#101026] +* Use `BulkRequest` to store Application Privileges {es-pull}102056[#102056] +* Use non-deprecated SAML callback URL in SAML smoketests {es-pull}99983[#99983] (issue: {es-issue}99986[#99986]) +* Use non-deprecated SAML callback URL in tests {es-pull}99983[#99983] (issue: {es-issue}99985[#99985]) + +CAT APIs:: +* Expose roles by default in cat allocation API {es-pull}101753[#101753] + +CRUD:: +* Cache resolved index for mgets {es-pull}101311[#101311] + +Data streams:: +* Introduce new endpoint to expose data stream lifecycle stats {es-pull}101845[#101845] +* Switch logs data streams to search all fields by default {es-pull}102456[#102456] (issue: {es-issue}99872[#99872]) + +Distributed:: +* Add support for configuring proxy scheme in S3 client settings and EC2 discovery plugin {es-pull}102495[#102495] (issue: {es-issue}101873[#101873]) +* Introduce a `StreamOutput` that counts how many bytes are written to the stream {es-pull}102906[#102906] +* Push s3 requests count via metrics API {es-pull}100383[#100383] +* Record operation purpose for s3 stats collection {es-pull}100236[#100236] + +EQL:: +* Add error logging for *QL {es-pull}101057[#101057] +* Use the eql query filter for the open-pit request {es-pull}103212[#103212] + +ES|QL:: +* ESQL: Add `profile` option {es-pull}102713[#102713] +* ESQL: Alias duplicated aggregations in a stats {es-pull}100642[#100642] (issue: {es-issue}100544[#100544]) +* ESQL: Load more than one field at once {es-pull}102192[#102192] +* ESQL: Load stored fields sequentially {es-pull}102727[#102727] +* ESQL: Load text field from parent keyword field {es-pull}102490[#102490] (issue: {es-issue}102473[#102473]) +* ESQL: Make blocks ref counted {es-pull}100408[#100408] +* ESQL: Make fieldcaps calls lighter {es-pull}102510[#102510] (issues: {es-issue}101763[#101763], {es-issue}102393[#102393]) +* ESQL: More tracking in `BlockHash` impls {es-pull}101488[#101488] +* ESQL: New telemetry commands {es-pull}102937[#102937] +* ESQL: Share constant null Blocks {es-pull}102673[#102673] +* ESQL: Short circuit loading empty doc values {es-pull}102434[#102434] +* ESQL: Support the `_source` metadata field {es-pull}102391[#102391] +* ESQL: Track blocks emitted from lucene {es-pull}101396[#101396] +* ESQL: Track memory from values loaded from lucene {es-pull}101383[#101383] +* Fast path for reading single doc with ordinals {es-pull}102902[#102902] +* Introduce local block factory {es-pull}102901[#102901] +* Load different way {es-pull}101235[#101235] +* Track ESQL enrich memory {es-pull}102184[#102184] +* Track blocks in `AsyncOperator` {es-pull}102188[#102188] +* Track blocks of intermediate state of aggs {es-pull}102562[#102562] +* Track blocks when hashing single multi-valued field {es-pull}102612[#102612] +* Track pages in ESQL enrich request/response {es-pull}102190[#102190] + +Engine:: +* Add static node settings to set default values for max merged segment sizes {es-pull}102208[#102208] + +Geo:: +* Add runtime field of type `geo_shape` {es-pull}100492[#100492] (issue: {es-issue}61299[#61299]) + +Health:: +* Add message field to `HealthPeriodicLogger` and `S3RequestRetryStats` {es-pull}101989[#101989] +* Add non-green indicator names to `HealthPeriodicLogger` message {es-pull}102245[#102245] + +ILM+SLM:: +* Health Report API should not return RED for unassigned cold/frozen shards when data is available {es-pull}100776[#100776] +* Switch fleet's built-in ILM policies to use .actions.rollover.max_primary_shard_size {es-pull}99984[#99984] (issue: {es-issue}99983[#99983]) + +Indices APIs:: +* Add executed pipelines to bulk api response {es-pull}100031[#100031] +* Add support for marking component templates as deprecated {es-pull}101148[#101148] (issue: {es-issue}100992[#100992]) +* Allowing non-dynamic index settings to be updated by automatically unassigning shards {es-pull}101723[#101723] +* Rename component templates and pipelines according to the new naming conventions {es-pull}99975[#99975] +* Run `TransportGetAliasesAction` on local node {es-pull}101815[#101815] + +Infra/CLI:: +* Set `ActiveProcessorCount` when `node.processors` is set {es-pull}101846[#101846] + +Infra/Core:: +* Add apm api for asynchronous counters (always increasing) {es-pull}102598[#102598] +* Log errors in `RestResponse` regardless of `error_trace` parameter {es-pull}101066[#101066] (issue: {es-issue}100884[#100884]) + +Infra/Logging:: +* Add status code to `rest.suppressed` log output {es-pull}100990[#100990] + +Ingest Node:: +* Deprecate the unused `elasticsearch_version` field of enrich policy json {es-pull}103013[#103013] +* Optimize `MurmurHash3` {es-pull}101202[#101202] + +Machine Learning:: +* Accept a single or multiple inputs to `_inference` {es-pull}102075[#102075] +* Add basic telelemetry for the inference feature {es-pull}102877[#102877] +* Add internal inference action for ml models an services {es-pull}102731[#102731] +* Add prefix strings option to trained models {es-pull}102089[#102089] +* Estimate the memory required to deploy trained models more accurately {es-pull}98874[#98874] +* Improve stability of spike and dip detection for the change point aggregation {es-pull}102637[#102637] +* Include ML processor limits in `_ml/info` response {es-pull}101392[#101392] +* Read scores from downloaded vocabulary for XLM Roberta tokenizers {es-pull}101868[#101868] +* Support for GET all models and by task type in the `_inference` API {es-pull}102806[#102806] +* Upgrade Boost libraries to version 1.83 {ml-pull}2560[#2560] + +Mapping:: +* Improve analyzer reload log message {es-pull}102273[#102273] + +Monitoring:: +* Add memory utilization Kibana metric to the monitoring index templates {es-pull}102810[#102810] +* Added `beat.stats.libbeat.pipeline.queue.max_events` {es-pull}102570[#102570] + +Network:: +* Record more detailed HTTP stats {es-pull}99852[#99852] + +Search:: +* Add metrics to the shared blob cache {es-pull}101577[#101577] +* Add support for Serbian Language Analyzer {es-pull}100921[#100921] +* Add support for `index_filter` to open pit {es-pull}102388[#102388] (issue: {es-issue}99740[#99740]) +* Added metric for cache eviction of entries with non zero frequency {es-pull}100570[#100570] +* Disable inter-segment concurrency when sorting by field {es-pull}101535[#101535] +* Enable query phase parallelism within a single shard {es-pull}101230[#101230] (issue: {es-issue}80693[#80693]) +* Node stats as metrics {es-pull}102248[#102248] +* Optimize `_count` type API requests {es-pull}102888[#102888] + +Security:: +* Expose the `invalidation` field in Get/Query `ApiKey` APIs {es-pull}102472[#102472] +* Make `api_key.delete.interval` a dynamic setting {es-pull}102680[#102680] + +Snapshot/Restore:: +* Fail S3 repository analysis on partial reads {es-pull}102840[#102840] +* Parallelize stale index deletion {es-pull}100316[#100316] (issue: {es-issue}61513[#61513]) +* Repo analysis of uncontended register behaviour {es-pull}101185[#101185] +* Repo analysis: allow configuration of register ops {es-pull}102051[#102051] +* Repo analysis: verify empty register {es-pull}102048[#102048] + +Stats:: +* Introduce includeShardsStats in the stats request to indicate that we only fetch a summary {es-pull}100466[#100466] (issue: {es-issue}99744[#99744]) +* Set includeShardsStats = false in NodesStatsRequest where the caller does not use shards-level statistics {es-pull}100938[#100938] + +Store:: +* Add methods for adding generation listeners with primary term {es-pull}100899[#100899] +* Allow executing multiple periodic flushes while they are being made durable {es-pull}102571[#102571] +* Pass shard's primary term to Engine#addSegmentGenerationListener {es-pull}99752[#99752] + +Transform:: +* Implement exponential backoff for transform state persistence retrying {es-pull}102512[#102512] (issue: {es-issue}102528[#102528]) +* Make tasks that calculate checkpoints time out {es-pull}101055[#101055] +* Pass source query to `_field_caps` (as `index_filter`) when deducing destination index mappings for better performance {es-pull}102379[#102379] +* Pass transform source query as `index_filter` to `open_point_in_time` request {es-pull}102447[#102447] (issue: {es-issue}101049[#101049]) +* Skip shards that don't match the source query during checkpointing {es-pull}102138[#102138] + +Vector Search:: +* Add vector_operation_count in profile output for knn searches {es-pull}102032[#102032] +* Make cosine similarity faster by storing magnitude and normalizing vectors {es-pull}99445[#99445] + +[[feature-8.12.0]] +[float] +=== New features + +Application:: +* Enable Connectors API as technical preview {es-pull}102994[#102994] +* [Behavioral Analytics] Analytics collections use Data Stream Lifecycle (DSL) instead of Index Lifecycle Management (ILM) for data retention management. Behavioral analytics has traditionally used ILM to manage data retention. Starting with 8.12.0, this will change. Analytics collections created prior to 8.12.0 will continue to use their existing ILM policies, but new analytics collections will be managed using DSL. {es-pull}100033[#100033] + +Authentication:: +* Patterns support for allowed subjects by the JWT realm {es-pull}102426[#102426] + +Cluster Coordination:: +* Add a node feature join barrier. This prevents nodes from joining clusters that do not have all the features already present in the cluster. This ensures that once a features is supported by all the nodes in a cluster, that feature will never then not be supported in the future. This is the corresponding functionality for the version join barrier, but for features + {es-pull}101609[#101609] + +Data streams:: +* Add ability to create a data stream failure store {es-pull}99134[#99134] + +ES|QL:: +* ESQL: emit warnings from single-value functions processing multi-values {es-pull}102417[#102417] (issue: {es-issue}98743[#98743]) +* GEO_POINT and CARTESIAN_POINT type support {es-pull}102177[#102177] + +Infra/Core:: +* Create new cluster state API for querying features present on a cluster {es-pull}100974[#100974] + +Ingest Node:: +* Adding a simulate ingest api {es-pull}101409[#101409] + +Security:: +* Allow granting API keys with JWT as the access_token {es-pull}101904[#101904] + +Vector Search:: +* Add byte quantization for float vectors in HNSW {es-pull}102093[#102093] +* Make knn search a query {es-pull}98916[#98916] + +[[regression-8.12.0]] +[float] +=== Regressions + +Infra/Core:: +* Revert non-semantic `NodeInfo` {es-pull}102636[#102636] + +[[upgrade-8.12.0]] +[float] +=== Upgrades + +Search:: +* Upgrade to Lucene 9.9.1 {es-pull}103549[#103549] + From 37fcadb0aebf14fe38f90783d4f9e43ef58f41e1 Mon Sep 17 00:00:00 2001 From: Henrik Nordvik Date: Wed, 17 Jan 2024 12:40:17 -0800 Subject: [PATCH 91/95] Add mapping for total_data_set_size (#104432) --- .../src/main/resources/monitoring-es-mb.json | 34 +++++++++++++++++++ .../MonitoringTemplateRegistry.java | 2 +- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es-mb.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es-mb.json index b3d6dc3936d59..233c170890d40 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es-mb.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es-mb.json @@ -101,6 +101,13 @@ "type": "long" } } + }, + "total_data_set_size": { + "properties": { + "bytes": { + "type": "long" + } + } } } } @@ -623,6 +630,13 @@ "type": "long" } } + }, + "total_data_set_size": { + "properties": { + "bytes": { + "type": "long" + } + } } } }, @@ -1253,6 +1267,9 @@ "properties": { "size_in_bytes": { "type": "long" + }, + "total_data_set_size_in_bytes": { + "type": "long" } } }, @@ -1410,6 +1427,9 @@ }, "size_in_bytes": { "type": "long" + }, + "total_data_set_size_in_bytes": { + "type": "long" } } }, @@ -1704,6 +1724,13 @@ "type": "long" } } + }, + "total_data_set_size": { + "properties": { + "bytes": { + "type": "long" + } + } } } }, @@ -1828,6 +1855,13 @@ "type": "long" } } + }, + "total_data_set_size": { + "properties": { + "bytes": { + "type": "long" + } + } } } }, diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java index eaec54ca9c1a3..753700a7ec913 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @@ -77,7 +77,7 @@ public class MonitoringTemplateRegistry extends IndexTemplateRegistry { * writes monitoring data in ECS format as of 8.0. These templates define the ECS schema as well as alias fields for the old monitoring * mappings that point to the corresponding ECS fields. */ - public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 13; + public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 14; private static final String STACK_MONITORING_REGISTRY_VERSION_VARIABLE = "xpack.stack.monitoring.template.release.version"; private static final String STACK_TEMPLATE_VERSION = "8"; private static final String STACK_TEMPLATE_VERSION_VARIABLE = "xpack.stack.monitoring.template.version"; From ebee04694768c3530cd480eb41dbf6bd395ebbac Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 17 Jan 2024 15:52:34 -0500 Subject: [PATCH 92/95] ESQL: Fix function registry on windows (#104475) The registry was replacing LINE_SEPARATOR with " ". But the files *always* contain `\n`. Never `\r\n`. Closes #104342 --- .../xpack/esql/expression/function/EsqlFunctionRegistry.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 0264d2b42eb35..b3229f1c36c2b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -264,7 +264,7 @@ public static FunctionDescription description(FunctionDefinition def) { } Constructor constructor = constructors[0]; FunctionInfo functionInfo = constructor.getAnnotation(FunctionInfo.class); - String functionDescription = functionInfo == null ? "" : functionInfo.description().replaceAll(System.lineSeparator(), " "); + String functionDescription = functionInfo == null ? "" : functionInfo.description().replaceAll("\n", " "); String[] returnType = functionInfo == null ? new String[] { "?" } : functionInfo.returnType(); var params = constructor.getParameters(); // no multiple c'tors supported @@ -277,7 +277,7 @@ public static FunctionDescription description(FunctionDefinition def) { String name = paramInfo == null ? params[i].getName() : paramInfo.name(); variadic |= List.class.isAssignableFrom(params[i].getType()); String[] type = paramInfo == null ? new String[] { "?" } : paramInfo.type(); - String desc = paramInfo == null ? "" : paramInfo.description().replaceAll(System.lineSeparator(), " "); + String desc = paramInfo == null ? "" : paramInfo.description().replaceAll("\n", " "); boolean optional = paramInfo == null ? false : paramInfo.optional(); args.add(new EsqlFunctionRegistry.ArgSignature(name, type, desc, optional)); From 1fdf2b544fb279f69567573f19d6e2cfcbc564ea Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Wed, 17 Jan 2024 16:07:30 -0500 Subject: [PATCH 93/95] Remove 8.11 from active branches --- branches.json | 3 --- 1 file changed, 3 deletions(-) diff --git a/branches.json b/branches.json index b33bb30e77cc4..289928f13daf7 100644 --- a/branches.json +++ b/branches.json @@ -7,9 +7,6 @@ { "branch": "8.12" }, - { - "branch": "8.11" - }, { "branch": "7.17" } From 42bda44adf1accf42e2c334bf134728c8424f547 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 17 Jan 2024 16:31:01 -0500 Subject: [PATCH 94/95] Fix async yaml tests (#104488) This fixes a problem with the async tests for `drop_null_columns` caused by us not passing the option when fetching from the async index. This option wasn't actually supported so I had to plumb that through as well. --- .../rest-api-spec/api/esql.async_query_get.json | 5 +++++ .../single_node/EsqlClientYamlAsyncSubmitAndFetchIT.java | 9 ++++++++- .../xpack/esql/action/RestEsqlGetAsyncResultAction.java | 8 ++++++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_get.json index bf38522cfb448..c4670758f7fe9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/esql.async_query_get.json @@ -33,6 +33,11 @@ "keep_alive": { "type": "time", "description": "Specify the time interval in which the results (partial or final) for this search will be available" + }, + "drop_null_columns": { + "type": "boolean", + "description": "Should entirely null columns be removed from the results? Their name and type will be returning in a new `all_columns` section.", + "default": false } } } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncSubmitAndFetchIT.java b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncSubmitAndFetchIT.java index 34eb2421b0432..0f2bf2703f62f 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncSubmitAndFetchIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlAsyncSubmitAndFetchIT.java @@ -87,9 +87,16 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx /* * Ok, we didn't finish before the timeout. Fine, let's fetch the result. */ + Map params = new HashMap<>(); + params.put("wait_for_completion_timeout", "30m"); + params.put("id", id); + String dropNullColumns = original.getApiCallSection().getParams().get("drop_null_columns"); + if (dropNullColumns != null) { + params.put("drop_null_columns", dropNullColumns); + } ClientYamlTestResponse fetchResponse = executionContext.callApi( "esql.async_query_get", - Map.of("wait_for_completion_timeout", "30m", "id", id), + params, List.of(), original.getApiCallSection().getHeaders(), original.getApiCallSection().getNodeSelector() diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java index 35a679e23d1f7..b5a1821350e5e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java @@ -16,8 +16,11 @@ import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; import java.util.List; +import java.util.Set; import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.xpack.esql.action.EsqlQueryResponse.DROP_NULL_COLUMNS_OPTION; +import static org.elasticsearch.xpack.esql.formatter.TextFormat.URL_PARAM_DELIMITER; @ServerlessScope(Scope.PUBLIC) public class RestEsqlGetAsyncResultAction extends BaseRestHandler { @@ -42,4 +45,9 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli } return channel -> client.execute(EsqlAsyncGetResultAction.INSTANCE, get, new RestRefCountedChunkedToXContentListener<>(channel)); } + + @Override + protected Set responseParams() { + return Set.of(URL_PARAM_DELIMITER, DROP_NULL_COLUMNS_OPTION); + } } From 833469cfb0d72f8da0558d5cda01cb3ac607a664 Mon Sep 17 00:00:00 2001 From: Mike Pellegrini Date: Wed, 17 Jan 2024 18:40:05 -0500 Subject: [PATCH 95/95] Store semantic_text model info in mappings (#103319) Store semantic_text model info in IndexMetadata: On document ingestion, we need to perform inference only once, in the coordinating node. Otherwise, we would be doing inference for each of the shards the document is stored in. The problem with the coordinating node is that it doesn't necessarily hold mapping information if it is not used for storing an index. A pure coordinating node doesn't have any mapping information at all. We need to understand when we need to generate text embeddings on the coordinating node. This means that the model information associated with index fields needs to be efficiently accessed from there. This information needs to be kept up to date with mapping changes, and not be recomputed otherwise. The model / fields information is going to be included as part of the IndexMetadata, to ensure it is communicated to all nodes in the cluster. --- .../cluster/ClusterStateDiffIT.java | 26 ++++- .../org/elasticsearch/TransportVersions.java | 1 + .../cluster/metadata/IndexMetadata.java | 99 +++++++++++++++++-- .../metadata/MetadataCreateIndexService.java | 2 + .../metadata/MetadataMappingService.java | 1 + .../index/mapper/FieldTypeLookup.java | 19 ++++ .../index/mapper/MappingLookup.java | 4 + .../cluster/metadata/IndexMetadataTests.java | 37 ++++++- .../index/mapper/FieldTypeLookupTests.java | 26 +++++ .../index/mapper/MappingLookupTests.java | 19 ++++ .../metadata/DataStreamTestHelper.java | 3 +- .../mapper/MockInferenceModelFieldType.java | 45 +++++++++ .../SemanticTextClusterMetadataTests.java | 54 ++++++++++ .../xpack/ml/LocalStateMachineLearning.java | 6 ++ 14 files changed, 331 insertions(+), 11 deletions(-) create mode 100644 test/framework/src/main/java/org/elasticsearch/index/mapper/MockInferenceModelFieldType.java create mode 100644 x-pack/plugin/ml/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index b869b3a90fbce..433b4bdaf5d98 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -55,6 +55,7 @@ import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -571,7 +572,7 @@ public IndexMetadata randomCreate(String name) { @Override public IndexMetadata randomChange(IndexMetadata part) { IndexMetadata.Builder builder = IndexMetadata.builder(part); - switch (randomIntBetween(0, 2)) { + switch (randomIntBetween(0, 3)) { case 0: builder.settings(Settings.builder().put(part.getSettings()).put(randomSettings(Settings.EMPTY))); break; @@ -585,11 +586,34 @@ public IndexMetadata randomChange(IndexMetadata part) { case 2: builder.settings(Settings.builder().put(part.getSettings()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)); break; + case 3: + builder.fieldsForModels(randomFieldsForModels()); + break; default: throw new IllegalArgumentException("Shouldn't be here"); } return builder.build(); } + + /** + * Generates a random fieldsForModels map + */ + private Map> randomFieldsForModels() { + if (randomBoolean()) { + return null; + } + + Map> fieldsForModels = new HashMap<>(); + for (int i = 0; i < randomIntBetween(0, 5); i++) { + Set fields = new HashSet<>(); + for (int j = 0; j < randomIntBetween(1, 4); j++) { + fields.add(randomAlphaOfLengthBetween(4, 10)); + } + fieldsForModels.put(randomAlphaOfLengthBetween(4, 10), fields); + } + + return fieldsForModels; + } }); } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index a730587f32c20..c914eac4927a0 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -188,6 +188,7 @@ static TransportVersion def(int id) { public static final TransportVersion PEERFINDER_REPORTS_PEERS_MASTERS = def(8_575_00_0); public static final TransportVersion ESQL_MULTI_CLUSTERS_ENRICH = def(8_576_00_0); public static final TransportVersion NESTED_KNN_MORE_INNER_HITS = def(8_577_00_0); + public static final TransportVersion SEMANTIC_TEXT_FIELD_ADDED = def(8_578_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 83b1c48e69eb9..a95c3e905d5f4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -78,6 +78,7 @@ import java.util.OptionalLong; import java.util.Set; import java.util.function.Function; +import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.Metadata.CONTEXT_MODE_PARAM; import static org.elasticsearch.cluster.metadata.Metadata.DEDUPLICATED_MAPPINGS_PARAM; @@ -540,6 +541,8 @@ public Iterator> settings() { public static final String KEY_SHARD_SIZE_FORECAST = "shard_size_forecast"; + public static final String KEY_FIELDS_FOR_MODELS = "fields_for_models"; + public static final String INDEX_STATE_FILE_PREFIX = "state-"; static final TransportVersion SYSTEM_INDEX_FLAG_ADDED = TransportVersions.V_7_10_0; @@ -629,6 +632,8 @@ public Iterator> settings() { private final Double writeLoadForecast; @Nullable private final Long shardSizeInBytesForecast; + // Key: model ID, Value: Fields that use model + private final ImmutableOpenMap> fieldsForModels; private IndexMetadata( final Index index, @@ -674,7 +679,8 @@ private IndexMetadata( final IndexVersion indexCompatibilityVersion, @Nullable final IndexMetadataStats stats, @Nullable final Double writeLoadForecast, - @Nullable Long shardSizeInBytesForecast + @Nullable Long shardSizeInBytesForecast, + final ImmutableOpenMap> fieldsForModels ) { this.index = index; this.version = version; @@ -730,6 +736,7 @@ private IndexMetadata( this.writeLoadForecast = writeLoadForecast; this.shardSizeInBytesForecast = shardSizeInBytesForecast; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; + this.fieldsForModels = Objects.requireNonNull(fieldsForModels); } IndexMetadata withMappingMetadata(MappingMetadata mapping) { @@ -780,7 +787,8 @@ IndexMetadata withMappingMetadata(MappingMetadata mapping) { this.indexCompatibilityVersion, this.stats, this.writeLoadForecast, - this.shardSizeInBytesForecast + this.shardSizeInBytesForecast, + this.fieldsForModels ); } @@ -838,7 +846,8 @@ public IndexMetadata withInSyncAllocationIds(int shardId, Set inSyncSet) this.indexCompatibilityVersion, this.stats, this.writeLoadForecast, - this.shardSizeInBytesForecast + this.shardSizeInBytesForecast, + this.fieldsForModels ); } @@ -894,7 +903,8 @@ public IndexMetadata withIncrementedPrimaryTerm(int shardId) { this.indexCompatibilityVersion, this.stats, this.writeLoadForecast, - this.shardSizeInBytesForecast + this.shardSizeInBytesForecast, + this.fieldsForModels ); } @@ -950,7 +960,8 @@ public IndexMetadata withTimestampRange(IndexLongFieldRange timestampRange) { this.indexCompatibilityVersion, this.stats, this.writeLoadForecast, - this.shardSizeInBytesForecast + this.shardSizeInBytesForecast, + this.fieldsForModels ); } @@ -1002,7 +1013,8 @@ public IndexMetadata withIncrementedVersion() { this.indexCompatibilityVersion, this.stats, this.writeLoadForecast, - this.shardSizeInBytesForecast + this.shardSizeInBytesForecast, + this.fieldsForModels ); } @@ -1206,6 +1218,10 @@ public OptionalLong getForecastedShardSizeInBytes() { return shardSizeInBytesForecast == null ? OptionalLong.empty() : OptionalLong.of(shardSizeInBytesForecast); } + public Map> getFieldsForModels() { + return fieldsForModels; + } + public static final String INDEX_RESIZE_SOURCE_UUID_KEY = "index.resize.source.uuid"; public static final String INDEX_RESIZE_SOURCE_NAME_KEY = "index.resize.source.name"; public static final Setting INDEX_RESIZE_SOURCE_UUID = Setting.simpleString(INDEX_RESIZE_SOURCE_UUID_KEY); @@ -1404,6 +1420,9 @@ public boolean equals(Object o) { if (rolloverInfos.equals(that.rolloverInfos) == false) { return false; } + if (fieldsForModels.equals(that.fieldsForModels) == false) { + return false; + } if (isSystem != that.isSystem) { return false; } @@ -1424,6 +1443,7 @@ public int hashCode() { result = 31 * result + Arrays.hashCode(primaryTerms); result = 31 * result + inSyncAllocationIds.hashCode(); result = 31 * result + rolloverInfos.hashCode(); + result = 31 * result + fieldsForModels.hashCode(); result = 31 * result + Boolean.hashCode(isSystem); return result; } @@ -1479,6 +1499,7 @@ private static class IndexMetadataDiff implements Diff { private final IndexMetadataStats stats; private final Double indexWriteLoadForecast; private final Long shardSizeInBytesForecast; + private final Diff>> fieldsForModels; IndexMetadataDiff(IndexMetadata before, IndexMetadata after) { index = after.index.getName(); @@ -1515,6 +1536,12 @@ private static class IndexMetadataDiff implements Diff { stats = after.stats; indexWriteLoadForecast = after.writeLoadForecast; shardSizeInBytesForecast = after.shardSizeInBytesForecast; + fieldsForModels = DiffableUtils.diff( + before.fieldsForModels, + after.fieldsForModels, + DiffableUtils.getStringKeySerializer(), + DiffableUtils.StringSetValueSerializer.getInstance() + ); } private static final DiffableUtils.DiffableValueReader ALIAS_METADATA_DIFF_VALUE_READER = @@ -1574,6 +1601,15 @@ private static class IndexMetadataDiff implements Diff { indexWriteLoadForecast = null; shardSizeInBytesForecast = null; } + if (in.getTransportVersion().onOrAfter(TransportVersions.SEMANTIC_TEXT_FIELD_ADDED)) { + fieldsForModels = DiffableUtils.readJdkMapDiff( + in, + DiffableUtils.getStringKeySerializer(), + DiffableUtils.StringSetValueSerializer.getInstance() + ); + } else { + fieldsForModels = DiffableUtils.emptyDiff(); + } } @Override @@ -1609,6 +1645,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalDouble(indexWriteLoadForecast); out.writeOptionalLong(shardSizeInBytesForecast); } + if (out.getTransportVersion().onOrAfter(TransportVersions.SEMANTIC_TEXT_FIELD_ADDED)) { + fieldsForModels.writeTo(out); + } } @Override @@ -1638,6 +1677,7 @@ public IndexMetadata apply(IndexMetadata part) { builder.stats(stats); builder.indexWriteLoadForecast(indexWriteLoadForecast); builder.shardSizeInBytesForecast(shardSizeInBytesForecast); + builder.fieldsForModels(fieldsForModels.apply(part.fieldsForModels)); return builder.build(true); } } @@ -1705,6 +1745,11 @@ public static IndexMetadata readFrom(StreamInput in, @Nullable Function i.readCollectionAsImmutableSet(StreamInput::readString)) + ); + } return builder.build(true); } @@ -1751,6 +1796,9 @@ public void writeTo(StreamOutput out, boolean mappingsAsHash) throws IOException out.writeOptionalDouble(writeLoadForecast); out.writeOptionalLong(shardSizeInBytesForecast); } + if (out.getTransportVersion().onOrAfter(TransportVersions.SEMANTIC_TEXT_FIELD_ADDED)) { + out.writeMap(fieldsForModels, StreamOutput::writeStringCollection); + } } @Override @@ -1800,6 +1848,7 @@ public static class Builder { private IndexMetadataStats stats = null; private Double indexWriteLoadForecast = null; private Long shardSizeInBytesForecast = null; + private final ImmutableOpenMap.Builder> fieldsForModels; public Builder(String index) { this.index = index; @@ -1807,6 +1856,7 @@ public Builder(String index) { this.customMetadata = ImmutableOpenMap.builder(); this.inSyncAllocationIds = new HashMap<>(); this.rolloverInfos = ImmutableOpenMap.builder(); + this.fieldsForModels = ImmutableOpenMap.builder(); this.isSystem = false; } @@ -1831,6 +1881,7 @@ public Builder(IndexMetadata indexMetadata) { this.stats = indexMetadata.stats; this.indexWriteLoadForecast = indexMetadata.writeLoadForecast; this.shardSizeInBytesForecast = indexMetadata.shardSizeInBytesForecast; + this.fieldsForModels = ImmutableOpenMap.builder(indexMetadata.fieldsForModels); } public Builder index(String index) { @@ -2060,6 +2111,11 @@ public Builder shardSizeInBytesForecast(Long shardSizeInBytesForecast) { return this; } + public Builder fieldsForModels(Map> fieldsForModels) { + processFieldsForModels(this.fieldsForModels, fieldsForModels); + return this; + } + public IndexMetadata build() { return build(false); } @@ -2254,7 +2310,8 @@ IndexMetadata build(boolean repair) { SETTING_INDEX_VERSION_COMPATIBILITY.get(settings), stats, indexWriteLoadForecast, - shardSizeInBytesForecast + shardSizeInBytesForecast, + fieldsForModels.build() ); } @@ -2380,6 +2437,10 @@ public static void toXContent(IndexMetadata indexMetadata, XContentBuilder build builder.field(KEY_SHARD_SIZE_FORECAST, indexMetadata.shardSizeInBytesForecast); } + if (indexMetadata.fieldsForModels.isEmpty() == false) { + builder.field(KEY_FIELDS_FOR_MODELS, indexMetadata.fieldsForModels); + } + builder.endObject(); } @@ -2457,6 +2518,19 @@ public static IndexMetadata fromXContent(XContentParser parser, Map> fieldsForModels = parser.map(HashMap::new, XContentParser::list) + .entrySet() + .stream() + .collect( + Collectors.toMap( + Map.Entry::getKey, + v -> v.getValue().stream().map(Object::toString).collect(Collectors.toUnmodifiableSet()) + ) + ); + builder.fieldsForModels(fieldsForModels); + break; default: // assume it's custom index metadata builder.putCustom(currentFieldName, parser.mapStrings()); @@ -2653,6 +2727,17 @@ private static void handleLegacyMapping(Builder builder, Map map builder.putMapping(new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, mapping)); } } + + private static void processFieldsForModels( + ImmutableOpenMap.Builder> builder, + Map> fieldsForModels + ) { + builder.clear(); + if (fieldsForModels != null) { + // Ensure that all field sets contained in the processed map are immutable + fieldsForModels.forEach((k, v) -> builder.put(k, Set.copyOf(v))); + } + } } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index da24f0b9d0dc5..d8fe0b0c19e52 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -1267,6 +1267,8 @@ static IndexMetadata buildIndexMetadata( if (mapper != null) { MappingMetadata mappingMd = new MappingMetadata(mapper); mappingsMetadata.put(mapper.type(), mappingMd); + + indexMetadataBuilder.fieldsForModels(mapper.mappers().getFieldsForModels()); } for (MappingMetadata mappingMd : mappingsMetadata.values()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java index 7a2d20d042f84..8d12ebd36c645 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataMappingService.java @@ -199,6 +199,7 @@ private static ClusterState applyRequest( DocumentMapper mapper = mapperService.documentMapper(); if (mapper != null) { indexMetadataBuilder.putMapping(new MappingMetadata(mapper)); + indexMetadataBuilder.fieldsForModels(mapper.mappers().getFieldsForModels()); } if (updatedMapping) { indexMetadataBuilder.mappingVersion(1 + indexMetadataBuilder.mappingVersion()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java index 2b4eec2bdd565..564e6f903a2ae 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldTypeLookup.java @@ -36,6 +36,11 @@ final class FieldTypeLookup { */ private final Map> fieldToCopiedFields; + /** + * A map from inference model ID to all fields that use the model to generate embeddings. + */ + private final Map> fieldsForModels; + private final int maxParentPathDots; FieldTypeLookup( @@ -48,6 +53,7 @@ final class FieldTypeLookup { final Map fullSubfieldNameToParentPath = new HashMap<>(); final Map dynamicFieldTypes = new HashMap<>(); final Map> fieldToCopiedFields = new HashMap<>(); + final Map> fieldsForModels = new HashMap<>(); for (FieldMapper fieldMapper : fieldMappers) { String fieldName = fieldMapper.name(); MappedFieldType fieldType = fieldMapper.fieldType(); @@ -65,6 +71,13 @@ final class FieldTypeLookup { } fieldToCopiedFields.get(targetField).add(fieldName); } + if (fieldType instanceof InferenceModelFieldType inferenceModelFieldType) { + String inferenceModel = inferenceModelFieldType.getInferenceModel(); + if (inferenceModel != null) { + Set fields = fieldsForModels.computeIfAbsent(inferenceModel, v -> new HashSet<>()); + fields.add(fieldName); + } + } } int maxParentPathDots = 0; @@ -97,6 +110,8 @@ final class FieldTypeLookup { // make values into more compact immutable sets to save memory fieldToCopiedFields.entrySet().forEach(e -> e.setValue(Set.copyOf(e.getValue()))); this.fieldToCopiedFields = Map.copyOf(fieldToCopiedFields); + fieldsForModels.entrySet().forEach(e -> e.setValue(Set.copyOf(e.getValue()))); + this.fieldsForModels = Map.copyOf(fieldsForModels); } public static int dotCount(String path) { @@ -205,6 +220,10 @@ Set sourcePaths(String field) { return fieldToCopiedFields.containsKey(resolvedField) ? fieldToCopiedFields.get(resolvedField) : Set.of(resolvedField); } + Map> getFieldsForModels() { + return fieldsForModels; + } + /** * If field is a leaf multi-field return the path to the parent field. Otherwise, return null. */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java index 4880ce5edc204..2c16a0fda9e60 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java @@ -498,4 +498,8 @@ public void validateDoesNotShadow(String name) { throw new MapperParsingException("Field [" + name + "] attempted to shadow a time_series_metric"); } } + + public Map> getFieldsForModels() { + return fieldTypeLookup.getFieldsForModels(); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index b4c9f670f66b6..58b8adcf53538 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -40,6 +40,7 @@ import java.io.IOException; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -82,6 +83,8 @@ public void testIndexMetadataSerialization() throws IOException { IndexMetadataStats indexStats = randomBoolean() ? randomIndexStats(numShard) : null; Double indexWriteLoadForecast = randomBoolean() ? randomDoubleBetween(0.0, 128, true) : null; Long shardSizeInBytesForecast = randomBoolean() ? randomLongBetween(1024, 10240) : null; + Map> fieldsForModels = randomFieldsForModels(true); + IndexMetadata metadata = IndexMetadata.builder("foo") .settings(indexSettings(numShard, numberOfReplicas).put("index.version.created", 1)) .creationDate(randomLong()) @@ -105,6 +108,7 @@ public void testIndexMetadataSerialization() throws IOException { .stats(indexStats) .indexWriteLoadForecast(indexWriteLoadForecast) .shardSizeInBytesForecast(shardSizeInBytesForecast) + .fieldsForModels(fieldsForModels) .build(); assertEquals(system, metadata.isSystem()); @@ -138,6 +142,7 @@ public void testIndexMetadataSerialization() throws IOException { assertEquals(metadata.getStats(), fromXContentMeta.getStats()); assertEquals(metadata.getForecastedWriteLoad(), fromXContentMeta.getForecastedWriteLoad()); assertEquals(metadata.getForecastedShardSizeInBytes(), fromXContentMeta.getForecastedShardSizeInBytes()); + assertEquals(metadata.getFieldsForModels(), fromXContentMeta.getFieldsForModels()); final BytesStreamOutput out = new BytesStreamOutput(); metadata.writeTo(out); @@ -159,8 +164,9 @@ public void testIndexMetadataSerialization() throws IOException { assertEquals(metadata.getCustomData(), deserialized.getCustomData()); assertEquals(metadata.isSystem(), deserialized.isSystem()); assertEquals(metadata.getStats(), deserialized.getStats()); - assertEquals(metadata.getForecastedWriteLoad(), fromXContentMeta.getForecastedWriteLoad()); - assertEquals(metadata.getForecastedShardSizeInBytes(), fromXContentMeta.getForecastedShardSizeInBytes()); + assertEquals(metadata.getForecastedWriteLoad(), deserialized.getForecastedWriteLoad()); + assertEquals(metadata.getForecastedShardSizeInBytes(), deserialized.getForecastedShardSizeInBytes()); + assertEquals(metadata.getFieldsForModels(), deserialized.getFieldsForModels()); } } @@ -544,10 +550,37 @@ public void testPartialIndexReceivesDataFrozenTierPreference() { } } + public void testFieldsForModels() { + Settings.Builder settings = indexSettings(IndexVersion.current(), randomIntBetween(1, 8), 0); + IndexMetadata idxMeta1 = IndexMetadata.builder("test").settings(settings).build(); + assertThat(idxMeta1.getFieldsForModels(), equalTo(Map.of())); + + Map> fieldsForModels = randomFieldsForModels(false); + IndexMetadata idxMeta2 = IndexMetadata.builder(idxMeta1).fieldsForModels(fieldsForModels).build(); + assertThat(idxMeta2.getFieldsForModels(), equalTo(fieldsForModels)); + } + private static Settings indexSettingsWithDataTier(String dataTier) { return indexSettings(IndexVersion.current(), 1, 0).put(DataTier.TIER_PREFERENCE, dataTier).build(); } + private static Map> randomFieldsForModels(boolean allowNull) { + if (allowNull && randomBoolean()) { + return null; + } + + Map> fieldsForModels = new HashMap<>(); + for (int i = 0; i < randomIntBetween(0, 5); i++) { + Set fields = new HashSet<>(); + for (int j = 0; j < randomIntBetween(1, 4); j++) { + fields.add(randomAlphaOfLengthBetween(4, 10)); + } + fieldsForModels.put(randomAlphaOfLengthBetween(4, 10), fields); + } + + return fieldsForModels; + } + private IndexMetadataStats randomIndexStats(int numberOfShards) { IndexWriteLoad.Builder indexWriteLoadBuilder = IndexWriteLoad.builder(numberOfShards); int numberOfPopulatedWriteLoads = randomIntBetween(0, numberOfShards); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java index 3f50b9fdf6621..27663edde945c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java @@ -16,6 +16,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import static java.util.Collections.emptyList; @@ -35,6 +36,10 @@ public void testEmpty() { Collection names = lookup.getMatchingFieldNames("foo"); assertNotNull(names); assertThat(names, hasSize(0)); + + Map> fieldsForModels = lookup.getFieldsForModels(); + assertNotNull(fieldsForModels); + assertTrue(fieldsForModels.isEmpty()); } public void testAddNewField() { @@ -42,6 +47,10 @@ public void testAddNewField() { FieldTypeLookup lookup = new FieldTypeLookup(Collections.singletonList(f), emptyList(), Collections.emptyList()); assertNull(lookup.get("bar")); assertEquals(f.fieldType(), lookup.get("foo")); + + Map> fieldsForModels = lookup.getFieldsForModels(); + assertNotNull(fieldsForModels); + assertTrue(fieldsForModels.isEmpty()); } public void testAddFieldAlias() { @@ -421,6 +430,23 @@ public void testRuntimeFieldNameOutsideContext() { } } + public void testInferenceModelFieldType() { + MockFieldMapper f1 = new MockFieldMapper(new MockInferenceModelFieldType("foo1", "bar1")); + MockFieldMapper f2 = new MockFieldMapper(new MockInferenceModelFieldType("foo2", "bar1")); + MockFieldMapper f3 = new MockFieldMapper(new MockInferenceModelFieldType("foo3", "bar2")); + + FieldTypeLookup lookup = new FieldTypeLookup(List.of(f1, f2, f3), emptyList(), emptyList()); + assertEquals(f1.fieldType(), lookup.get("foo1")); + assertEquals(f2.fieldType(), lookup.get("foo2")); + assertEquals(f3.fieldType(), lookup.get("foo3")); + + Map> fieldsForModels = lookup.getFieldsForModels(); + assertNotNull(fieldsForModels); + assertEquals(2, fieldsForModels.size()); + assertEquals(Set.of("foo1", "foo2"), fieldsForModels.get("bar1")); + assertEquals(Set.of("foo3"), fieldsForModels.get("bar2")); + } + private static FlattenedFieldMapper createFlattenedMapper(String fieldName) { return new FlattenedFieldMapper.Builder(fieldName).build(MapperBuilderContext.root(false, false)); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java index 0308dac5fa216..f512f5d352a43 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingLookupTests.java @@ -26,6 +26,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; import static java.util.Collections.emptyList; @@ -121,6 +122,8 @@ public void testEmptyMappingLookup() { assertEquals(0, mappingLookup.getMapping().getMetadataMappersMap().size()); assertFalse(mappingLookup.fieldMappers().iterator().hasNext()); assertEquals(0, mappingLookup.getMatchingFieldNames("*").size()); + assertNotNull(mappingLookup.getFieldsForModels()); + assertTrue(mappingLookup.getFieldsForModels().isEmpty()); } public void testValidateDoesNotShadow() { @@ -188,6 +191,22 @@ public MetricType getMetricType() { ); } + public void testFieldsForModels() { + MockInferenceModelFieldType fieldType = new MockInferenceModelFieldType("test_field_name", "test_model_id"); + MappingLookup mappingLookup = createMappingLookup( + Collections.singletonList(new MockFieldMapper(fieldType)), + emptyList(), + emptyList() + ); + assertEquals(1, size(mappingLookup.fieldMappers())); + assertEquals(fieldType, mappingLookup.getFieldType("test_field_name")); + + Map> fieldsForModels = mappingLookup.getFieldsForModels(); + assertNotNull(fieldsForModels); + assertEquals(1, fieldsForModels.size()); + assertEquals(Collections.singleton("test_field_name"), fieldsForModels.get("test_model_id")); + } + private void assertAnalyzes(Analyzer analyzer, String field, String output) throws IOException { try (TokenStream tok = analyzer.tokenStream(field, new StringReader(""))) { CharTermAttribute term = tok.addAttribute(CharTermAttribute.class); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index d0b30bff92f3e..99fb21d652d93 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -539,7 +539,7 @@ public static MetadataRolloverService getMetadataRolloverService( AllocationService allocationService = mock(AllocationService.class); when(allocationService.reroute(any(ClusterState.class), any(String.class), any())).then(i -> i.getArguments()[0]); when(allocationService.getShardRoutingRoleStrategy()).thenReturn(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY); - MappingLookup mappingLookup = null; + MappingLookup mappingLookup = MappingLookup.EMPTY; if (dataStream != null) { RootObjectMapper.Builder root = new RootObjectMapper.Builder("_doc", ObjectMapper.Defaults.SUBOBJECTS); root.add( @@ -616,6 +616,7 @@ public static IndicesService mockIndicesServices(MappingLookup mappingLookup) th DocumentMapper documentMapper = mock(DocumentMapper.class); when(documentMapper.mapping()).thenReturn(mapping); when(documentMapper.mappingSource()).thenReturn(mapping.toCompressedXContent()); + when(documentMapper.mappers()).thenReturn(mappingLookup); RoutingFieldMapper routingFieldMapper = mock(RoutingFieldMapper.class); when(routingFieldMapper.required()).thenReturn(false); when(documentMapper.routingFieldMapper()).thenReturn(routingFieldMapper); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MockInferenceModelFieldType.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockInferenceModelFieldType.java new file mode 100644 index 0000000000000..854749d6308db --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MockInferenceModelFieldType.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.search.Query; +import org.elasticsearch.index.query.SearchExecutionContext; + +import java.util.Map; + +public class MockInferenceModelFieldType extends SimpleMappedFieldType implements InferenceModelFieldType { + private static final String TYPE_NAME = "mock_inference_model_field_type"; + + private final String modelId; + + public MockInferenceModelFieldType(String name, String modelId) { + super(name, false, false, false, TextSearchInfo.NONE, Map.of()); + this.modelId = modelId; + } + + @Override + public String typeName() { + return TYPE_NAME; + } + + @Override + public Query termQuery(Object value, SearchExecutionContext context) { + throw new IllegalArgumentException("termQuery not implemented"); + } + + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return SourceValueFetcher.toString(name(), context, format); + } + + @Override + public String getInferenceModel() { + return modelId; + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java new file mode 100644 index 0000000000000..47cae14003c70 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/cluster/metadata/SemanticTextClusterMetadataTests.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.service.ClusterStateTaskExecutorUtils; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class SemanticTextClusterMetadataTests extends MlSingleNodeTestCase { + public void testCreateIndexWithSemanticTextField() { + final IndexService indexService = createIndex( + "test", + client().admin().indices().prepareCreate("test").setMapping("field", "type=semantic_text,model_id=test_model") + ); + assertEquals(Map.of("test_model", Set.of("field")), indexService.getMetadata().getFieldsForModels()); + } + + public void testAddSemanticTextField() throws Exception { + final IndexService indexService = createIndex("test", client().admin().indices().prepareCreate("test")); + final MetadataMappingService mappingService = getInstanceFromNode(MetadataMappingService.class); + final MetadataMappingService.PutMappingExecutor putMappingExecutor = mappingService.new PutMappingExecutor(); + final ClusterService clusterService = getInstanceFromNode(ClusterService.class); + + final PutMappingClusterStateUpdateRequest request = new PutMappingClusterStateUpdateRequest(""" + { "properties": { "field": { "type": "semantic_text", "model_id": "test_model" }}}"""); + request.indices(new Index[] { indexService.index() }); + final var resultingState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + clusterService.state(), + putMappingExecutor, + singleTask(request) + ); + assertEquals(Map.of("test_model", Set.of("field")), resultingState.metadata().index("test").getFieldsForModels()); + } + + private static List singleTask(PutMappingClusterStateUpdateRequest request) { + return Collections.singletonList(new MetadataMappingService.PutMappingClusterStateUpdateTask(request, ActionListener.running(() -> { + throw new AssertionError("task should not complete publication"); + }))); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java index 2d7832d747de4..5af3fd527e31e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/LocalStateMachineLearning.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; +import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.XPackLicenseState; @@ -102,6 +103,11 @@ public Map> getTokeniz return mlPlugin.getTokenizers(); } + @Override + public Map getMappers() { + return mlPlugin.getMappers(); + } + /** * This is only required as we now have to have the GetRollupIndexCapsAction as a valid action in our node. * The MachineLearningLicenseTests attempt to create a datafeed referencing this LocalStateMachineLearning object.