From 9a876704f896fb4635e00725cc3a7bc62364827e Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Tue, 19 Sep 2023 14:38:41 +0100 Subject: [PATCH 01/27] Store dynamic mapping updates as builders (#90674) Currently, newly created dynamic mappers as stored on the parser context as instantiated mappers. This means that when we build the mapping update from the collected dynamic mappers, we need to convert them back into builders again, and immediately re-build them. This commit changes the context to instead store just the builders, which results in a small simplification to the dynamic update build logic and removes another method on ObjectMapper.Builder. --- .../index/mapper/DocumentParser.java | 32 ++++++++------ .../index/mapper/DocumentParserContext.java | 42 +++++++++---------- .../index/mapper/DynamicFieldsBuilder.java | 26 +++--------- .../index/mapper/NumberFieldMapper.java | 2 +- .../index/mapper/ObjectMapper.java | 39 +++++++++-------- .../vectors/DenseVectorFieldMapper.java | 27 ++---------- .../mapper/DynamicFieldsBuilderTests.java | 8 ++-- .../index/mapper/ObjectMapperTests.java | 31 +++++++------- .../mapper/ConstantKeywordFieldMapper.java | 7 ++-- 9 files changed, 94 insertions(+), 120 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index d98bb8b367694..03653322c383c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -250,8 +250,8 @@ static Mapping createDynamicUpdate(DocumentParserContext context) { return null; } RootObjectMapper.Builder rootBuilder = context.updateRoot(); - context.getDynamicMappers().forEach(mapper -> rootBuilder.addDynamic(mapper.name(), null, mapper, context)); - + context.getDynamicMappers() + .forEach((name, builders) -> builders.forEach(builder -> rootBuilder.addDynamic(name, null, builder, context))); for (RuntimeField runtimeField : context.getDynamicRuntimeFields()) { rootBuilder.addRuntimeField(runtimeField); } @@ -484,13 +484,20 @@ private static void parseObjectDynamic(DocumentParserContext context, String cur // not dynamic, read everything up to end object context.parser().skipChildren(); } else { + Mapper.Builder dynamicObjectBuilder = null; Mapper dynamicObjectMapper; if (context.dynamic() == ObjectMapper.Dynamic.RUNTIME) { // with dynamic:runtime all leaf fields will be runtime fields unless explicitly mapped, // hence we don't dynamically create empty objects under properties, but rather carry around an artificial object mapper dynamicObjectMapper = new NoOpObjectMapper(currentFieldName, context.path().pathAsText(currentFieldName)); } else { - dynamicObjectMapper = DynamicFieldsBuilder.createDynamicObjectMapper(context, currentFieldName); + dynamicObjectBuilder = DynamicFieldsBuilder.findTemplateBuilderForObject(context, currentFieldName); + if (dynamicObjectBuilder == null) { + dynamicObjectBuilder = new ObjectMapper.Builder(currentFieldName, ObjectMapper.Defaults.SUBOBJECTS).enabled( + ObjectMapper.Defaults.ENABLED + ); + } + dynamicObjectMapper = dynamicObjectBuilder.build(context.createDynamicMapperBuilderContext()); } if (context.parent().subobjects() == false) { if (dynamicObjectMapper instanceof NestedObjectMapper) { @@ -512,8 +519,8 @@ private static void parseObjectDynamic(DocumentParserContext context, String cur } } - if (context.dynamic() != ObjectMapper.Dynamic.RUNTIME) { - context.addDynamicMapper(dynamicObjectMapper); + if (context.dynamic() != ObjectMapper.Dynamic.RUNTIME && dynamicObjectBuilder != null) { + context.addDynamicMapper(dynamicObjectMapper.name(), dynamicObjectBuilder); } if (dynamicObjectMapper instanceof NestedObjectMapper && context.isWithinCopyTo()) { throwOnCreateDynamicNestedViaCopyTo(dynamicObjectMapper, context); @@ -554,12 +561,13 @@ private static void parseArray(DocumentParserContext context, String lastFieldNa } else if (context.dynamic() == ObjectMapper.Dynamic.FALSE) { context.parser().skipChildren(); } else { - Mapper objectMapperFromTemplate = DynamicFieldsBuilder.createObjectMapperFromTemplate(context, lastFieldName); - if (objectMapperFromTemplate == null) { + Mapper.Builder objectBuilderFromTemplate = DynamicFieldsBuilder.findTemplateBuilderForObject(context, lastFieldName); + if (objectBuilderFromTemplate == null) { parseNonDynamicArray(context, lastFieldName, lastFieldName); } else { + Mapper objectMapperFromTemplate = objectBuilderFromTemplate.build(context.createDynamicMapperBuilderContext()); if (parsesArrayValue(objectMapperFromTemplate)) { - context.addDynamicMapper(objectMapperFromTemplate); + context.addDynamicMapper(objectMapperFromTemplate.name(), objectBuilderFromTemplate); context.path().add(lastFieldName); parseObjectOrField(context, objectMapperFromTemplate); context.path().remove(); @@ -603,13 +611,14 @@ private static void postProcessDynamicArrayMapping(DocumentParserContext context if (context.indexSettings().getIndexVersionCreated().onOrAfter(DYNAMICALLY_MAP_DENSE_VECTORS_INDEX_VERSION)) { final MapperBuilderContext builderContext = context.createDynamicMapperBuilderContext(); final String fullFieldName = builderContext.buildFullName(fieldName); - final List mappers = context.getDynamicMappers(fullFieldName); + final List mappers = context.getDynamicMappers(fullFieldName); if (mappers == null || context.isFieldAppliedFromTemplate(fullFieldName) || context.isCopyToField(fullFieldName) || mappers.size() < MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING || mappers.size() > MAX_DIMS_COUNT - || mappers.stream().allMatch(m -> m instanceof NumberFieldMapper && "float".equals(m.typeName())) == false) { + || mappers.stream() + .allMatch(m -> m instanceof NumberFieldMapper.Builder nb && nb.type != NumberFieldMapper.NumberType.FLOAT)) { return; } @@ -617,8 +626,7 @@ private static void postProcessDynamicArrayMapping(DocumentParserContext context fieldName, context.indexSettings().getIndexVersionCreated() ); - DenseVectorFieldMapper denseVectorFieldMapper = builder.build(builderContext); - context.updateDynamicMappers(fullFieldName, List.of(denseVectorFieldMapper)); + context.updateDynamicMappers(fullFieldName, builder); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index 700f0e492af73..f47b392115f81 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -84,9 +84,9 @@ protected void addDoc(LuceneDocument doc) { private final MappingParserContext mappingParserContext; private final SourceToParse sourceToParse; private final Set ignoredFields; - private final Map> dynamicMappers; + private final Map> dynamicMappers; private final Set newFieldsSeen; - private final Map dynamicObjectMappers; + private final Map dynamicObjectMappers; private final List dynamicRuntimeFields; private final DocumentDimensions dimensions; private final ObjectMapper parent; @@ -102,9 +102,9 @@ private DocumentParserContext( MappingParserContext mappingParserContext, SourceToParse sourceToParse, Set ignoreFields, - Map> dynamicMappers, + Map> dynamicMappers, Set newFieldsSeen, - Map dynamicObjectMappers, + Map dynamicObjectMappers, List dynamicRuntimeFields, String id, Field version, @@ -304,29 +304,29 @@ public boolean isCopyToField(String name) { /** * Add a new mapper dynamically created while parsing. */ - public final void addDynamicMapper(Mapper mapper) { + public final void addDynamicMapper(String fullName, Mapper.Builder builder) { // eagerly check object depth limit here to avoid stack overflow errors - if (mapper instanceof ObjectMapper) { - MappingLookup.checkObjectDepthLimit(indexSettings().getMappingDepthLimit(), mapper.name()); + if (builder instanceof ObjectMapper.Builder) { + MappingLookup.checkObjectDepthLimit(indexSettings().getMappingDepthLimit(), fullName); } // eagerly check field name limit here to avoid OOM errors // only check fields that are not already mapped or tracked in order to avoid hitting field limit too early via double-counting // note that existing fields can also receive dynamic mapping updates (e.g. constant_keyword to fix the value) - if (mappingLookup.getMapper(mapper.name()) == null - && mappingLookup.objectMappers().containsKey(mapper.name()) == false - && newFieldsSeen.add(mapper.name())) { + if (mappingLookup.getMapper(fullName) == null + && mappingLookup.objectMappers().containsKey(fullName) == false + && newFieldsSeen.add(fullName)) { mappingLookup.checkFieldLimit(indexSettings().getMappingTotalFieldsLimit(), newFieldsSeen.size()); } - if (mapper instanceof ObjectMapper objectMapper) { - dynamicObjectMappers.put(objectMapper.name(), objectMapper); + if (builder instanceof ObjectMapper.Builder objectMapper) { + dynamicObjectMappers.put(fullName, objectMapper); // dynamic object mappers may have been obtained from applying a dynamic template, in which case their definition may contain // sub-fields as well as sub-objects that need to be added to the mappings - for (Mapper submapper : objectMapper.mappers.values()) { + for (Mapper.Builder submapper : objectMapper.subBuilders()) { // we could potentially skip the step of adding these to the dynamic mappers, because their parent is already added to // that list, and what is important is that all of the intermediate objects are added to the dynamic object mappers so that // they can be looked up once sub-fields need to be added to them. For simplicity, we treat these like any other object - addDynamicMapper(submapper); + addDynamicMapper(fullName + "." + submapper.name, submapper); } } @@ -336,7 +336,7 @@ public final void addDynamicMapper(Mapper mapper) { // dynamically mapped objects when the incoming document defines no sub-fields in them: // 1) by default, they would be empty containers in the mappings, is it then important to map them? // 2) they can be the result of applying a dynamic template which may define sub-fields or set dynamic, enabled or subobjects. - dynamicMappers.computeIfAbsent(mapper.name(), k -> new ArrayList<>()).add(mapper); + dynamicMappers.computeIfAbsent(fullName, k -> new ArrayList<>()).add(builder); } /** @@ -345,8 +345,8 @@ public final void addDynamicMapper(Mapper mapper) { * Consists of a all {@link Mapper}s that will need to be added to their respective parent {@link ObjectMapper}s in order * to become part of the resulting dynamic mapping update. */ - public final List getDynamicMappers() { - return dynamicMappers.values().stream().flatMap(List::stream).toList(); + public final Map> getDynamicMappers() { + return dynamicMappers; } /** @@ -355,13 +355,13 @@ public final List getDynamicMappers() { * @param fieldName Full field name with dot-notation. * @return List of Mappers or null */ - public final List getDynamicMappers(String fieldName) { + public final List getDynamicMappers(String fieldName) { return dynamicMappers.get(fieldName); } - public void updateDynamicMappers(String name, List mappers) { + public void updateDynamicMappers(String name, Mapper.Builder mapper) { dynamicMappers.remove(name); - mappers.forEach(this::addDynamicMapper); + dynamicMappers.put(name, List.of(mapper)); } /** @@ -371,7 +371,7 @@ public void updateDynamicMappers(String name, List mappers) { * Holds a flat set of object mappers, meaning that an object field named foo.bar can be looked up directly with its * dotted name. */ - final ObjectMapper getDynamicObjectMapper(String name) { + final ObjectMapper.Builder getDynamicObjectMapper(String name) { return dynamicObjectMappers.get(name); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java index f2d1b8058f115..620b972ee04bf 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java @@ -155,25 +155,6 @@ void createDynamicFieldFromValue(final DocumentParserContext context, String nam } } - /** - * Returns a dynamically created object mapper, eventually based on a matching dynamic template. - */ - static Mapper createDynamicObjectMapper(DocumentParserContext context, String name) { - Mapper mapper = createObjectMapperFromTemplate(context, name); - return mapper != null - ? mapper - : new ObjectMapper.Builder(name, ObjectMapper.Defaults.SUBOBJECTS).enabled(ObjectMapper.Defaults.ENABLED) - .build(context.createDynamicMapperBuilderContext()); - } - - /** - * Returns a dynamically created object mapper, based exclusively on a matching dynamic template, null otherwise. - */ - static Mapper createObjectMapperFromTemplate(DocumentParserContext context, String name) { - Mapper.Builder templateBuilder = findTemplateBuilderForObject(context, name); - return templateBuilder == null ? null : templateBuilder.build(context.createDynamicMapperBuilderContext()); - } - /** * Creates a dynamic string field based on a matching dynamic template. * No field is created in case there is no matching dynamic template. @@ -253,7 +234,10 @@ private static boolean applyMatchingTemplate( return true; } - private static Mapper.Builder findTemplateBuilderForObject(DocumentParserContext context, String name) { + /** + * Returns a dynamically created object builder, based exclusively on a matching dynamic template, null otherwise. + */ + static Mapper.Builder findTemplateBuilderForObject(DocumentParserContext context, String name) { DynamicTemplate.XContentFieldType matchType = DynamicTemplate.XContentFieldType.OBJECT; DynamicTemplate dynamicTemplate = context.findDynamicTemplate(name, matchType); if (dynamicTemplate == null) { @@ -309,7 +293,7 @@ private static final class Concrete implements Strategy { void createDynamicField(Mapper.Builder builder, DocumentParserContext context) throws IOException { Mapper mapper = builder.build(context.createDynamicMapperBuilderContext()); - context.addDynamicMapper(mapper); + context.addDynamicMapper(mapper.name(), builder); parseField.accept(context, mapper); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 34763eda69a28..32683fd6469bf 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -118,7 +118,7 @@ public static class Builder extends FieldMapper.Builder { private final Parameter> meta = Parameter.metaParam(); private final ScriptCompiler scriptCompiler; - private final NumberType type; + public final NumberType type; private boolean allowMultipleValues = true; private final IndexVersion indexCreatedVersion; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java index c62f010b35af2..851892c3a05e9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/ObjectMapper.java @@ -25,10 +25,12 @@ import java.util.Collection; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.stream.Stream; public class ObjectMapper extends Mapper implements Cloneable { @@ -77,6 +79,7 @@ public static class Builder extends Mapper.Builder { protected Explicit enabled = Explicit.IMPLICIT_TRUE; protected Dynamic dynamic; protected final List mappersBuilders = new ArrayList<>(); + private final Set subMapperNames = new HashSet<>(); // keeps track of dynamically added subfields public Builder(String name, Explicit subobjects) { super(name); @@ -95,31 +98,27 @@ public Builder dynamic(Dynamic dynamic) { public Builder add(Mapper.Builder builder) { mappersBuilders.add(builder); + subMapperNames.add(builder.name); return this; } - private void add(String name, Mapper mapper) { - add(new Mapper.Builder(name) { - @Override - public Mapper build(MapperBuilderContext context) { - return mapper; - } - }); + public Collection subBuilders() { + return mappersBuilders; } /** - * Adds a dynamically created {@link Mapper} to this builder. + * Adds a dynamically created {@link Mapper.Builder} to this builder. * * @param name the name of the Mapper, including object prefixes * @param prefix the object prefix of this mapper * @param mapper the mapper to add * @param context the DocumentParserContext in which the mapper has been built */ - public final void addDynamic(String name, String prefix, Mapper mapper, DocumentParserContext context) { + public final void addDynamic(String name, String prefix, Mapper.Builder mapper, DocumentParserContext context) { // If the mapper to add has no dots, or the current object mapper has subobjects set to false, // we just add it as it is for sure a leaf mapper if (name.contains(".") == false || subobjects.value() == false) { - add(name, mapper); + add(mapper); } // otherwise we strip off the first object path of the mapper name, load or create // the relevant object mapper, and then recurse down into it, passing the remainder @@ -129,22 +128,28 @@ public final void addDynamic(String name, String prefix, Mapper mapper, Document int firstDotIndex = name.indexOf("."); String immediateChild = name.substring(0, firstDotIndex); String immediateChildFullName = prefix == null ? immediateChild : prefix + "." + immediateChild; - ObjectMapper.Builder parentBuilder = findObjectBuilder(immediateChildFullName, context); + ObjectMapper.Builder parentBuilder = findObjectBuilder(immediateChild, immediateChildFullName, context); parentBuilder.addDynamic(name.substring(firstDotIndex + 1), immediateChildFullName, mapper, context); - add(parentBuilder); } } - private static ObjectMapper.Builder findObjectBuilder(String fullName, DocumentParserContext context) { + private ObjectMapper.Builder findObjectBuilder(String leafName, String fullName, DocumentParserContext context) { // does the object mapper already exist? if so, use that ObjectMapper objectMapper = context.mappingLookup().objectMappers().get(fullName); if (objectMapper != null) { - return objectMapper.newBuilder(context.indexSettings().getIndexVersionCreated()); + ObjectMapper.Builder builder = objectMapper.newBuilder(context.indexSettings().getIndexVersionCreated()); + add(builder); + return builder; } // has the object mapper been added as a dynamic update already? - objectMapper = context.getDynamicObjectMapper(fullName); - if (objectMapper != null) { - return objectMapper.newBuilder(context.indexSettings().getIndexVersionCreated()); + ObjectMapper.Builder builder = context.getDynamicObjectMapper(fullName); + if (builder != null) { + // we re-use builder instances so if the builder has already been + // added we don't need to do so again + if (subMapperNames.contains(leafName) == false) { + add(builder); + } + return builder; } throw new IllegalStateException("Missing intermediate object " + fullName); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index bd9b9df68aff2..365d4f615e30c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -35,7 +35,6 @@ import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MappingLookup; @@ -979,29 +978,9 @@ public void parse(DocumentParserContext context) throws IOException { } if (fieldType().dims == null) { int dims = elementType.parseDimensionCount(context); - DenseVectorFieldType updatedDenseVectorFieldType = new DenseVectorFieldType( - fieldType().name(), - indexCreatedVersion, - elementType, - dims, - indexed, - similarity, - fieldType().meta() - ); - Mapper update = new DenseVectorFieldMapper( - simpleName(), - updatedDenseVectorFieldType, - elementType, - dims, - indexed, - similarity, - indexOptions, - indexCreatedVersion, - multiFields(), - copyTo - ); - context.addDynamicMapper(update); - + DenseVectorFieldMapper.Builder update = (DenseVectorFieldMapper.Builder) getMergeBuilder(); + update.dims.setValue(dims); + context.addDynamicMapper(name(), update); return; } Field field = fieldType().indexed ? parseKnnVector(context) : parseBinaryDocValuesVector(context); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java index 0e4945f7faea8..f9c332d21a876 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicFieldsBuilderTests.java @@ -16,6 +16,7 @@ import java.io.IOException; import java.util.List; +import java.util.Map; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -54,9 +55,10 @@ public XContentParser parser() { parser.nextToken(); assertTrue(parser.currentToken().isValue()); DynamicFieldsBuilder.DYNAMIC_TRUE.createDynamicFieldFromValue(ctx, fieldname); - List dynamicMappers = ctx.getDynamicMappers(); + Map> dynamicMappers = ctx.getDynamicMappers(); assertEquals(1, dynamicMappers.size()); - assertEquals(fieldname, dynamicMappers.get(0).name()); - assertEquals(expectedType, dynamicMappers.get(0).typeName()); + Mapper mapper = dynamicMappers.get(fieldname).get(0).build(MapperBuilderContext.root(false, false)); + assertEquals(fieldname, mapper.name()); + assertEquals(expectedType, mapper.typeName()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java index 3c77bf20b37d2..ec6a9ddd53e2c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperTests.java @@ -30,23 +30,20 @@ public class ObjectMapperTests extends MapperServiceTestCase { public void testDifferentInnerObjectTokenFailure() throws Exception { DocumentMapper defaultMapper = createDocumentMapper(mapping(b -> {})); - IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> defaultMapper.parse(new SourceToParse("1", new BytesArray(""" - { - "object": { - "array":[ - { - "object": { "value": "value" } - }, - { - "object":"value" - } - ] - }, - "value":"value" - }""".indent(1)), XContentType.JSON)) - ); + Exception e = expectThrows(IllegalArgumentException.class, () -> defaultMapper.parse(new SourceToParse("1", new BytesArray(""" + { + "object": { + "array":[ + { + "object": { "value": "value" } + }, + { + "object":"value" + } + ] + }, + "value":"value" + }""".indent(1)), XContentType.JSON))); assertThat(e.getMessage(), containsString("can't merge a non object mapping [object.array.object] with an object mapping")); } diff --git a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java index a530a8bf46623..e4d827409dfe1 100644 --- a/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-constant-keyword/src/main/java/org/elasticsearch/xpack/constantkeyword/mapper/ConstantKeywordFieldMapper.java @@ -35,7 +35,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.SourceLoader; @@ -300,9 +299,9 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } if (fieldType().value == null) { - ConstantKeywordFieldType newFieldType = new ConstantKeywordFieldType(fieldType().name(), value, fieldType().meta()); - Mapper update = new ConstantKeywordFieldMapper(simpleName(), newFieldType); - context.addDynamicMapper(update); + Builder update = new Builder(simpleName()); + update.value.setValue(value); + context.addDynamicMapper(fieldType().name(), update); } else if (Objects.equals(fieldType().value, value) == false) { throw new IllegalArgumentException( "[constant_keyword] field [" From 1b1bf76cc42b89f8e83712a6f76ccbca6e62aa6b Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 19 Sep 2023 15:14:36 +0100 Subject: [PATCH 02/27] Close expired search contexts on SEARCH thread (#99660) In a production cluster, I observed the `[scheduler]` thread stuck for a while trying to delete index files that became unreferenced while closing a search context. We shouldn't be doing I/O on the scheduler thread. This commit moves it to a `SEARCH` thread instead. --- docs/changelog/99660.yaml | 5 +++++ .../main/java/org/elasticsearch/search/SearchService.java | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/99660.yaml diff --git a/docs/changelog/99660.yaml b/docs/changelog/99660.yaml new file mode 100644 index 0000000000000..ea19e24d51fff --- /dev/null +++ b/docs/changelog/99660.yaml @@ -0,0 +1,5 @@ +pr: 99660 +summary: Close expired search contexts on SEARCH thread +area: Search +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 5ceeb53a8df6d..713fa4fa6c3e1 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -120,6 +120,7 @@ import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.Transports; import java.io.IOException; import java.util.ArrayList; @@ -340,7 +341,7 @@ public SearchService( SearchService::validateKeepAlives ); - this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval, EsExecutors.DIRECT_EXECUTOR_SERVICE); + this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval, threadPool.executor(Names.SEARCH)); defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout); @@ -1572,6 +1573,8 @@ void addResultsObject(SearchContext context) { class Reaper implements Runnable { @Override public void run() { + assert Transports.assertNotTransportThread("closing contexts may do IO, e.g. deleting dangling files") + && ThreadPool.assertNotScheduleThread("closing contexts may do IO, e.g. deleting dangling files"); for (ReaderContext context : activeReaders.values()) { if (context.isExpired()) { logger.debug("freeing search context [{}]", context.id()); From 7fc1f3a8c9cc975205f9e3452f1ba5c8b52fcdd9 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 19 Sep 2023 07:24:10 -0700 Subject: [PATCH 03/27] Add first transport version with new layout (#99640) This commit adds teh version transport version that uses the new patch capable layout. The layout also supports independent serverless transport versions. --- .../org/elasticsearch/TransportVersions.java | 28 ++++++++++++++++--- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index f1d5bc6f02a7c..16a99d38f6623 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -14,11 +14,11 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Map; import java.util.NavigableMap; import java.util.Set; import java.util.TreeMap; +import java.util.TreeSet; public class TransportVersions { @@ -27,7 +27,7 @@ public class TransportVersions { * This map is used during class construction, referenced by the registerTransportVersion method. * When all the transport version constants have been registered, the map is cleared & never touched again. */ - static Set IDS = new HashSet<>(); + static TreeSet IDS = new TreeSet<>(); static TransportVersion def(int id) { if (IDS == null) throw new IllegalStateException("The IDS map needs to be present to call this method"); @@ -35,6 +35,9 @@ static TransportVersion def(int id) { if (IDS.add(id) == false) { throw new IllegalArgumentException("Version id " + id + " defined twice"); } + if (id < IDS.last()) { + throw new IllegalArgumentException("Version id " + id + " is not defined in the right location. Keep constants sorted"); + } return new TransportVersion(id); } @@ -137,6 +140,7 @@ static TransportVersion def(int id) { public static final TransportVersion COMPAT_VERSIONS_MAPPING_VERSION_ADDED = def(8_500_073); public static final TransportVersion V_8_500_074 = def(8_500_074); public static final TransportVersion NODE_INFO_INDEX_VERSION_ADDED = def(8_500_075); + public static final TransportVersion FIRST_NEW_ID_LAYOUT = def(8_501_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ @@ -148,8 +152,24 @@ static TransportVersion def(int id) { * A new transport version should be added EVERY TIME a change is made to the serialization protocol of one or more classes. Each * transport version should only be used in a single merged commit (apart from the BwC versions copied from o.e.Version, ≤V_8_8_1). * - * To add a new transport version, add a new constant at the bottom of the list, above this comment, which is one greater than the - * current highest version and ensure it has a fresh UUID. Don't add other lines, comments, etc. + * ADDING A TRANSPORT VERSION + * To add a new transport version, add a new constant at the bottom of the list, above this comment. Don't add other lines, + * comments, etc. The version id has the following layout: + * + * M_NNN_SS_P + * + * M - The major version of Elasticsearch + * NNN - The server version part + * SS - The serverless version part. It should always be 00 here, it is used by serverless only. + * P - The patch version part + * + * To determine the id of the next TransportVersion constant, do the following: + * - Use the same major version, unless bumping majors + * - Bump the server version part by 1, unless creating a patch version + * - Leave the serverless part as 00 + * - Bump the patch part if creating a patch version + * + * If a patch version is created, it should be placed sorted among the other existing constants. * * REVERTING A TRANSPORT VERSION * From 6fc05a6bb13a18781cb935b37ad0a8611d2b1bac Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 19 Sep 2023 16:43:42 +0200 Subject: [PATCH 04/27] Remove redundant fields on TextFieldMapper and KeywordFieldMapper (#99666) These fields were also on the mapped field types, no need to waste bytes on them. --- .../AnnotatedTextFieldMapper.java | 2 +- .../index/mapper/KeywordFieldMapper.java | 6 +-- .../index/mapper/MapperService.java | 2 - .../index/mapper/TextFieldMapper.java | 41 +++++++++++-------- 4 files changed, 27 insertions(+), 24 deletions(-) diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index 22de0d03de15a..7153fcf4d46b3 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -480,7 +480,7 @@ private AnnotatedTextFieldType( boolean isSyntheticSource, Map meta ) { - super(name, true, store, tsi, isSyntheticSource, null, meta); + super(name, true, store, tsi, isSyntheticSource, null, meta, false, false); } public AnnotatedTextFieldType(String name, Map meta) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 8d9c77f503ab9..2dfd4cbfff0b5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -161,7 +161,9 @@ public static class Builder extends FieldMapper.Builder { private final Parameter indexOptions = TextParams.keywordIndexOptions(m -> toType(m).indexOptions); private final Parameter hasNorms = TextParams.norms(false, m -> toType(m).fieldType.omitNorms() == false); - private final Parameter similarity = TextParams.similarity(m -> toType(m).similarity); + private final Parameter similarity = TextParams.similarity( + m -> toType(m).fieldType().getTextSearchInfo().similarity() + ); private final Parameter normalizer; @@ -808,7 +810,6 @@ public void validateMatchedRoutingPath(final String routingPath) { private final boolean hasDocValues; private final String indexOptions; private final FieldType fieldType; - private final SimilarityProvider similarity; private final String normalizerName; private final boolean splitQueriesOnWhitespace; private final Script script; @@ -833,7 +834,6 @@ private KeywordFieldMapper( this.hasDocValues = builder.hasDocValues.getValue(); this.indexOptions = builder.indexOptions.getValue(); this.fieldType = freezeAndDeduplicateFieldType(fieldType); - this.similarity = builder.similarity.getValue(); this.normalizerName = builder.normalizer.getValue(); this.splitQueriesOnWhitespace = builder.splitQueriesOnWhitespace.getValue(); this.script = builder.script.get(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 9f9e45f53b837..13aaf1a93a4d2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -128,7 +128,6 @@ public enum MergeReason { private final IndexVersion indexVersionCreated; private final MapperRegistry mapperRegistry; private final Supplier mappingParserContextSupplier; - private final Supplier documentParsingObserverSupplier; private volatile DocumentMapper mapper; @@ -186,7 +185,6 @@ public MapperService( indexSettings, idFieldMapper ); - this.documentParsingObserverSupplier = documentParsingObserverSupplier; this.documentParser = new DocumentParser( parserConfiguration, this.mappingParserContextSupplier.get(), diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 4c3e1d38b6d57..dd1adaa3c49e7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -258,11 +258,16 @@ public static class Builder extends FieldMapper.Builder { final Parameter eagerGlobalOrdinals = Parameter.boolParam( "eager_global_ordinals", true, - m -> ((TextFieldMapper) m).eagerGlobalOrdinals, + m -> ((TextFieldMapper) m).fieldType().eagerGlobalOrdinals, false ); - final Parameter indexPhrases = Parameter.boolParam("index_phrases", false, m -> ((TextFieldMapper) m).indexPhrases, false); + final Parameter indexPhrases = Parameter.boolParam( + "index_phrases", + false, + m -> ((TextFieldMapper) m).fieldType().indexPhrases, + false + ); final Parameter indexPrefixes = new Parameter<>( "index_prefixes", false, @@ -366,9 +371,10 @@ private TextFieldType buildFieldType( tsi, context.isSourceSynthetic(), syntheticSourceDelegate(fieldType, multiFields), - meta.getValue() + meta.getValue(), + eagerGlobalOrdinals.getValue(), + indexPhrases.getValue() ); - ft.eagerGlobalOrdinals = eagerGlobalOrdinals.getValue(); if (fieldData.getValue()) { ft.setFielddata(true, freqFilter.getValue()); } @@ -443,7 +449,6 @@ private SubFieldInfo buildPhraseInfo(FieldType fieldType, TextFieldType parent) throw new IllegalArgumentException("Cannot set index_phrases on field [" + name() + "] if positions are not enabled"); } FieldType phraseFieldType = new FieldType(fieldType); - parent.setIndexPhrases(); PhraseWrappedAnalyzer a = new PhraseWrappedAnalyzer( analyzers.getIndexAnalyzer().analyzer(), analyzers.positionIncrementGap.get() @@ -648,8 +653,8 @@ public static class TextFieldType extends StringFieldType { private boolean fielddata; private FielddataFrequencyFilter filter; private PrefixFieldType prefixFieldType; - private boolean indexPhrases = false; - private boolean eagerGlobalOrdinals = false; + private final boolean indexPhrases; + private final boolean eagerGlobalOrdinals; private final boolean isSyntheticSource; /** * In some configurations text fields use a sub-keyword field to provide @@ -665,12 +670,16 @@ public TextFieldType( TextSearchInfo tsi, boolean isSyntheticSource, KeywordFieldMapper.KeywordFieldType syntheticSourceDelegate, - Map meta + Map meta, + boolean eagerGlobalOrdinals, + boolean indexPhrases ) { super(name, indexed, stored, false, tsi, meta); fielddata = false; this.isSyntheticSource = isSyntheticSource; this.syntheticSourceDelegate = syntheticSourceDelegate; + this.eagerGlobalOrdinals = eagerGlobalOrdinals; + this.indexPhrases = indexPhrases; } public TextFieldType(String name, boolean indexed, boolean stored, Map meta) { @@ -685,6 +694,8 @@ public TextFieldType(String name, boolean indexed, boolean stored, Map meta) { - super(name, indexed, stored, tsi, false, null, meta); + super(name, indexed, stored, tsi, false, null, meta, false, false); } public ConstantScoreTextFieldType(String name) { @@ -1114,11 +1123,9 @@ public Query existsQuery(SearchExecutionContext context) { private final NamedAnalyzer indexAnalyzer; private final IndexAnalyzers indexAnalyzers; private final int positionIncrementGap; - private final boolean eagerGlobalOrdinals; private final PrefixConfig indexPrefixes; private final FielddataFrequencyFilter freqFilter; private final boolean fieldData; - private final boolean indexPhrases; private final FieldType fieldType; private final SubFieldInfo prefixFieldInfo; private final SubFieldInfo phraseFieldInfo; @@ -1152,11 +1159,9 @@ protected TextFieldMapper( this.indexOptions = builder.indexOptions.getValue(); this.norms = builder.norms.getValue(); this.termVectors = builder.termVectors.getValue(); - this.eagerGlobalOrdinals = builder.eagerGlobalOrdinals.getValue(); this.indexPrefixes = builder.indexPrefixes.getValue(); this.freqFilter = builder.freqFilter.getValue(); this.fieldData = builder.fieldData.get(); - this.indexPhrases = builder.indexPhrases.getValue(); } @Override From 9d39e3f0e9e202af394532788355755c81aad98c Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 19 Sep 2023 08:14:42 -0700 Subject: [PATCH 05/27] Fix mock plugin service when loading service providers (#99664) When tests run with a mock plugin service, they may load service providers. Normally each plugin would be in its own classloader. However, in the test case, there is usually a single classloader. This means any service provider implementations appear in the system classloader of the test. The mock plugin service attempts to workaround this fact by detecting when a service provider comes from the system classloader and using a Set to avoid creating duplicates. However, this does not work since the servic providers are already instance and have identity. In this commit we track the created service providers differently, collecting by the concrete SPI type. When the concrete type is already loaded, we skip instantiating multiple times. --- .../plugins/PluginsServiceTests.java | 4 +- .../plugins/MockPluginsService.java | 42 +++++++++++++------ 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index eca3afa6c5ebb..72aba521f1b79 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -705,10 +705,10 @@ public void testLoadServiceProvidersInSameClassLoader() { .instance(); // We shouldn't find the FooTestService implementation with PluginOther - assertThat(MockPluginsService.createExtensions(TestService.class, othPlugin), empty()); + assertThat(MockPluginsService.createExtensions(TestService.class, othPlugin, e -> false), empty()); // We should find the FooTestService implementation when we use FooPlugin, because it matches the constructor arg. - var providers = MockPluginsService.createExtensions(TestService.class, fooPlugin); + var providers = MockPluginsService.createExtensions(TestService.class, fooPlugin, e -> false); assertThat(providers, allOf(hasSize(1), everyItem(instanceOf(BarTestService.class)))); } diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java index 48c5fa8c84415..686105f9ed74e 100644 --- a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java +++ b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java @@ -23,11 +23,11 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Set; +import java.util.function.Predicate; public class MockPluginsService extends PluginsService { @@ -92,9 +92,10 @@ public PluginsAndModules info() { @Override @SuppressWarnings({ "rawtypes", "unchecked" }) public List loadServiceProviders(Class service) { - // We use a set here to avoid duplicates because SPIClassIterator will match + // We use a map here to avoid duplicates because SPIClassIterator will match // all plugins in MockNode, because all plugins are loaded by the same class loader. - Set result = new HashSet<>(); + // Each entry in the map is a unique service provider implementation. + Map, T> result = new HashMap<>(); for (LoadedPlugin pluginTuple : plugins()) { var plugin = pluginTuple.instance(); var classLoader = plugin.getClass().getClassLoader(); @@ -105,32 +106,47 @@ public List loadServiceProviders(Class service) { var res = new ArrayList>(); SPIClassIterator.get(service, classLoader).forEachRemaining(res::add); return List.copyOf(res); - }).iterator()); + }).iterator(), result::containsKey); } else { - extension = createExtensions(service, plugin); + extension = createExtensions(service, plugin, result::containsKey); } - result.addAll(extension); + extension.forEach(e -> result.put(e.getClass(), e)); } - return List.copyOf(result); + return List.copyOf(result.values()); } /** * When we load tests with MockNode, all plugins are loaded with the same class loader, * which breaks loading service providers with our SPIClassIterator. Since all plugins are * loaded in the same class loader, we find all plugins for any class found by the SPIClassIterator - * causing us to pass wrong plugin type to createExtension. This modified createExtensions, checks for - * the type and returns an empty list if the plugin class type is incompatible. + * causing us to pass plugin types to createExtension that aren't actually part of that plugin. + * This modified createExtensions, checks for the type and returns an empty list if the + * plugin class type is incompatible. It also skips loading extension types that have already + * been loaded, so that duplicates are not created. */ - static List createExtensions(Class extensionPointType, Plugin plugin) { + static List createExtensions( + Class extensionPointType, + Plugin plugin, + Predicate> loadedPredicate + ) { Iterator> classIterator = SPIClassIterator.get(extensionPointType, plugin.getClass().getClassLoader()); - return createExtensions(extensionPointType, plugin, classIterator); + return createExtensions(extensionPointType, plugin, classIterator, loadedPredicate); } - private static List createExtensions(Class extensionPointType, Plugin plugin, Iterator> classIterator) { + private static List createExtensions( + Class extensionPointType, + Plugin plugin, + Iterator> classIterator, + Predicate> loadedPredicate + ) { List extensions = new ArrayList<>(); while (classIterator.hasNext()) { Class extensionClass = classIterator.next(); + if (loadedPredicate.test(extensionClass)) { + // skip extensions that have already been loaded + continue; + } @SuppressWarnings("unchecked") Constructor[] constructors = (Constructor[]) extensionClass.getConstructors(); From 33d8604d869ed6649c4af893df99f4e24a8b49ba Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Tue, 19 Sep 2023 17:20:23 +0200 Subject: [PATCH 06/27] Make ESQL more resilient to non-indexed fields (#99588) Fixes https://github.com/elastic/elasticsearch/issues/99506 This is an attempt to avoid runtime exceptions when dealing with fields that have no `doc_values` support (eg. that are not indexed) and then cannot be extracted. In this scenario, we currently support extraction of keyword fields and of text fields when source is present (ie. not with synthetic source), but for all the other types, ESQL fails with an exception. This PR adds a last resort defense against these errors, returning a default value when actual values cannot be extracted, and avoiding runtime failures. There is a significant change here that impacts unsupported fields: before this PR, unsupported field values where returned as `""`. We cannot use this value also in this case, because at this stage the data type is well defined already, with all the constraints in terms of block types, so we cannot always return a BytesRef (eg. for numeric, that has different blocks, or IP, that requires a specific string format). To avoid multiple ways of returning invalid values, this PR uniforms it returning `null` in all cases AND emitting a warning regarding the unsupported field. This has a few advantages: - `null` is valid for all types - it doesn't overlap with valid values (eg. `""` is a valid value for a KEYWORD field) - it's the only alternative for types where defining a value for unsupported values is practically impossible, like for numerics - keeps the result clean, moving the report of the problem to the right place, that is warnings There is an alternative to this approach, that is to try to intercept the problem during the query analysis/resolution phase. It could be more elegant, but we risk to miss some cases and still have to catch errors during the physical/extraction/execution phase, so we'll probably need this anyway. --- docs/changelog/99588.yaml | 6 + .../lucene/UnsupportedValueSource.java | 22 +-- .../compute/lucene/ValueSources.java | 73 +++++++-- .../resources/rest-api-spec/test/40_tsdb.yml | 8 +- .../test/40_unsupported_types.yml | 52 ++++--- .../rest-api-spec/test/90_non_indexed.yml | 146 ++++++++++++++++++ .../src/main/resources/mapping-basic.json | 5 + .../optimizer/LocalPhysicalPlanOptimizer.java | 16 +- .../xpack/esql/analysis/AnalyzerTests.java | 17 +- .../LocalLogicalPlanOptimizerTests.java | 14 +- .../optimizer/PhysicalPlanOptimizerTests.java | 41 ++++- 11 files changed, 329 insertions(+), 71 deletions(-) create mode 100644 docs/changelog/99588.yaml create mode 100644 x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/90_non_indexed.yml diff --git a/docs/changelog/99588.yaml b/docs/changelog/99588.yaml new file mode 100644 index 0000000000000..7cbb53376fdf0 --- /dev/null +++ b/docs/changelog/99588.yaml @@ -0,0 +1,6 @@ +pr: 99588 +summary: Make ESQL more resilient to non-indexed fields +area: ES|QL +type: bug +issues: + - 99506 diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/UnsupportedValueSource.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/UnsupportedValueSource.java index d3ed8da1a17b0..3f2632d9a643f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/UnsupportedValueSource.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/UnsupportedValueSource.java @@ -8,9 +8,9 @@ package org.elasticsearch.compute.lucene; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Rounding; import org.elasticsearch.index.fielddata.DocValueBits; +import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -20,8 +20,7 @@ public class UnsupportedValueSource extends ValuesSource { - public static final String UNSUPPORTED_OUTPUT = ""; - private static final BytesRef result = new BytesRef(UNSUPPORTED_OUTPUT); + public static final String UNSUPPORTED_OUTPUT = null; private final ValuesSource originalSource; public UnsupportedValueSource(ValuesSource originalSource) { @@ -37,22 +36,7 @@ public SortedBinaryDocValues bytesValues(LeafReaderContext context) throws IOExc // ignore and fall back to UNSUPPORTED_OUTPUT } } - return new SortedBinaryDocValues() { - @Override - public boolean advanceExact(int doc) throws IOException { - return true; - } - - @Override - public int docValueCount() { - return 1; - } - - @Override - public BytesRef nextValue() throws IOException { - return result; - } - }; + return FieldData.emptySortedBinary(); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java index 7f852b3c6908b..d1d68df52362c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java @@ -7,10 +7,16 @@ package org.elasticsearch.compute.lucene; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.SourceValueFetcherSortedBinaryIndexFieldData; import org.elasticsearch.index.fielddata.StoredFieldSortedBinaryIndexFieldData; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -24,6 +30,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; +import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -71,19 +78,9 @@ public static List sources( try { fieldData = ctx.getForField(fieldType, MappedFieldType.FielddataOperation.SEARCH); } catch (IllegalArgumentException e) { - if (asUnsupportedSource) { - sources.add( - new ValueSourceInfo( - new UnsupportedValueSourceType(fieldType.typeName()), - new UnsupportedValueSource(null), - elementType, - ctx.getIndexReader() - ) - ); - continue; - } else { - throw e; - } + sources.add(unsupportedValueSource(elementType, ctx, fieldType, e)); + HeaderWarning.addWarning("Field [{}] cannot be retrieved, it is unsupported or not indexed; returning null", fieldName); + continue; } var fieldContext = new FieldContext(fieldName, fieldData, fieldType); var vsType = fieldData.getValuesSourceType(); @@ -106,6 +103,56 @@ public static List sources( return sources; } + private static ValueSourceInfo unsupportedValueSource( + ElementType elementType, + SearchExecutionContext ctx, + MappedFieldType fieldType, + IllegalArgumentException e + ) { + return switch (elementType) { + case BYTES_REF -> new ValueSourceInfo( + new UnsupportedValueSourceType(fieldType.typeName()), + new UnsupportedValueSource(null), + elementType, + ctx.getIndexReader() + ); + case LONG, INT -> new ValueSourceInfo( + CoreValuesSourceType.NUMERIC, + ValuesSource.Numeric.EMPTY, + elementType, + ctx.getIndexReader() + ); + case BOOLEAN -> new ValueSourceInfo( + CoreValuesSourceType.BOOLEAN, + ValuesSource.Numeric.EMPTY, + elementType, + ctx.getIndexReader() + ); + case DOUBLE -> new ValueSourceInfo(CoreValuesSourceType.NUMERIC, new ValuesSource.Numeric() { + @Override + public boolean isFloatingPoint() { + return true; + } + + @Override + public SortedNumericDocValues longValues(LeafReaderContext context) { + return DocValues.emptySortedNumeric(); + } + + @Override + public SortedNumericDoubleValues doubleValues(LeafReaderContext context) throws IOException { + return org.elasticsearch.index.fielddata.FieldData.emptySortedNumericDoubles(); + } + + @Override + public SortedBinaryDocValues bytesValues(LeafReaderContext context) throws IOException { + return org.elasticsearch.index.fielddata.FieldData.emptySortedBinary(); + } + }, elementType, ctx.getIndexReader()); + default -> throw e; + }; + } + private static TextValueSource textValueSource(SearchExecutionContext ctx, MappedFieldType fieldType) { if (fieldType.isStored()) { IndexFieldData fieldData = new StoredFieldSortedBinaryIndexFieldData( diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml index a72205b3af064..d72d09644a128 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: allowed_warnings_regex - do: indices.create: index: test @@ -84,6 +86,8 @@ load everything: --- load a document: - do: + allowed_warnings_regex: + - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" esql.query: body: query: 'from test | where @timestamp == "2021-04-28T18:50:23.142Z"' @@ -93,8 +97,8 @@ load a document: - match: {values.0.0: "2021-04-28T18:50:23.142Z"} - match: {values.0.1: "10.10.55.3"} - match: {values.0.2: "dog"} - - match: {values.0.3: ""} - - match: {values.0.4: ""} + - match: {values.0.3: null } + - match: {values.0.4: null } - match: {values.0.5: "df3145b3-0563-4d3b-a0f7-897eb2876ea9"} - match: {values.0.6: "pod"} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml index 52f7460ea727e..44af9559598ab 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml @@ -1,5 +1,6 @@ ---- -unsupported: +setup: + - skip: + features: allowed_warnings_regex - do: indices.create: index: test @@ -98,10 +99,15 @@ unsupported: "some_doc": { "foo": "xy", "bar": 12 } } +--- +unsupported: - do: + allowed_warnings_regex: + - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" esql.query: body: query: 'from test' + - match: { columns.0.name: aggregate_metric_double } - match: { columns.0.type: unsupported } - match: { columns.1.name: binary } @@ -158,29 +164,29 @@ unsupported: - match: { columns.26.type: integer } - length: { values: 1 } - - match: { values.0.0: "" } - - match: { values.0.1: "" } - - match: { values.0.2: "" } - - match: { values.0.3: "" } - - match: { values.0.4: "" } - - match: { values.0.5: "" } - - match: { values.0.6: "" } - - match: { values.0.7: "" } - - match: { values.0.8: "" } - - match: { values.0.9: "" } - - match: { values.0.10: "" } - - match: { values.0.11: "" } - - match: { values.0.12: "" } - - match: { values.0.13: "" } + - match: { values.0.0: null } + - match: { values.0.1: null } + - match: { values.0.2: null } + - match: { values.0.3: null } + - match: { values.0.4: null } + - match: { values.0.5: null } + - match: { values.0.6: null } + - match: { values.0.7: null } + - match: { values.0.8: null } + - match: { values.0.9: null } + - match: { values.0.10: null } + - match: { values.0.11: null } + - match: { values.0.12: null } + - match: { values.0.13: null } - match: { values.0.14: "foo bar baz" } - match: { values.0.15: Alice } - - match: { values.0.16: "" } - - match: { values.0.17: "" } - - match: { values.0.18: "" } - - match: { values.0.19: "" } - - match: { values.0.20: "" } - - match: { values.0.21: "" } - - match: { values.0.22: "" } + - match: { values.0.16: null } + - match: { values.0.17: null } + - match: { values.0.18: null } + - match: { values.0.19: null } + - match: { values.0.20: null } + - match: { values.0.21: null } + - match: { values.0.22: null } - match: { values.0.23: 12 } - match: { values.0.24: xy } - match: { values.0.25: "foo bar" } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/90_non_indexed.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/90_non_indexed.yml new file mode 100644 index 0000000000000..53dc5bab6df46 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/90_non_indexed.yml @@ -0,0 +1,146 @@ +setup: + - skip: + features: allowed_warnings_regex + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 5 + mappings: + properties: + boolean: + type: boolean + boolean_noidx: + type: boolean + index: false + doc_values: false + date: + type: date + date_noidx: + type: date + index: false + doc_values: false + double: + type: double + double_noidx: + type: double + index: false + doc_values: false + float: + type: float + float_noidx: + type: float + index: false + doc_values: false + integer: + type: integer + integer_noidx: + type: integer + index: false + doc_values: false + ip: + type: ip + ip_noidx: + type: ip + index: false + doc_values: false + keyword: + type: keyword + keyword_noidx: + type: keyword + index: false + doc_values: false + long: + type: long + long_noidx: + type: long + index: false + doc_values: false + + + - do: + bulk: + index: test + refresh: true + body: + - { "index": { } } + - { + "keyword": "foo", + "keyword_noidx": "foo", + "boolean": true, + "boolean_noidx": true, + "integer": 10, + "integer_noidx": 10, + "long": 20, + "long_noidx": 20, + "float": 30, + "float_noidx": 30, + "double": 40, + "double_noidx": 40, + "date": "2021-04-28T18:50:04.467Z", + "date_noidx": "2021-04-28T18:50:04.467Z", + "ip": "192.168.0.1", + "ip_noidx": "192.168.0.1" + } + +--- +unsupported: + - do: + allowed_warnings_regex: + - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" + esql.query: + body: + query: 'from test' + + - match: { columns.0.name: boolean } + - match: { columns.0.type: boolean } + - match: { columns.1.name: boolean_noidx } + - match: { columns.1.type: boolean } + - match: { columns.2.name: date } + - match: { columns.2.type: date } + - match: { columns.3.name: date_noidx } + - match: { columns.3.type: date } + - match: { columns.4.name: double } + - match: { columns.4.type: double } + - match: { columns.5.name: double_noidx } + - match: { columns.5.type: double } + - match: { columns.6.name: float } + - match: { columns.6.type: double } + - match: { columns.7.name: float_noidx } + - match: { columns.7.type: double } + - match: { columns.8.name: integer } + - match: { columns.8.type: integer } + - match: { columns.9.name: integer_noidx } + - match: { columns.9.type: integer } + - match: { columns.10.name: ip } + - match: { columns.10.type: ip } + - match: { columns.11.name: ip_noidx } + - match: { columns.11.type: ip } + - match: { columns.12.name: keyword } + - match: { columns.12.type: keyword } + - match: { columns.13.name: keyword_noidx } + - match: { columns.13.type: keyword } + - match: { columns.14.name: long } + - match: { columns.14.type: long } + - match: { columns.15.name: long_noidx } + - match: { columns.15.type: long } + + - length: { values: 1 } + + - match: { values.0.0: true } + - match: { values.0.1: null } + - match: { values.0.2: "2021-04-28T18:50:04.467Z" } + - match: { values.0.3: null } + - match: { values.0.4: 40 } + - match: { values.0.5: null } + - match: { values.0.6: 30 } + - match: { values.0.7: null } + - match: { values.0.8: 10 } + - match: { values.0.9: null } + - match: { values.0.10: "192.168.0.1" } + - match: { values.0.11: null } + - match: { values.0.12: "foo" } + - match: { values.0.13: "foo" } # this is a special case, ESQL can retrieve keywords from source + - match: { values.0.14: 20 } + - match: { values.0.15: null } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-basic.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-basic.json index b650cb7e64564..9ce87d01bfbb9 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-basic.json +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-basic.json @@ -28,6 +28,11 @@ "type": "keyword" } } + }, + "long_noidx": { + "type": "long", + "index": false, + "doc_values": false } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index e837858153cc1..0f4d194d8016c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -230,7 +230,7 @@ public static boolean canPushToSource(Expression exp) { private static boolean isAttributePushable(Expression expression, ScalarFunction operation) { if (expression instanceof FieldAttribute f && f.getExactInfo().hasExact()) { - return true; + return isAggregatable(f); } if (expression instanceof MetadataAttribute ma && ma.searchable()) { return operation == null @@ -243,6 +243,17 @@ private static boolean isAttributePushable(Expression expression, ScalarFunction } } + /** + * this method is supposed to be used to define if a field can be used for exact push down (eg. sort or filter). + * "aggregatable" is the most accurate information we can have from field_caps as of now. + * Pushing down operations on fields that are not aggregatable would result in an error. + * @param f + * @return + */ + private static boolean isAggregatable(FieldAttribute f) { + return f.exactAttribute().field().isAggregatable(); + } + private static class PushLimitToSource extends OptimizerRule { @Override protected PhysicalPlan rule(LimitExec limitExec) { @@ -280,7 +291,8 @@ protected PhysicalPlan rule(TopNExec topNExec) { private boolean canPushDownOrders(List orders) { // allow only exact FieldAttributes (no expressions) for sorting - return orders.stream().allMatch(o -> o.child() instanceof FieldAttribute fa && fa.getExactInfo().hasExact()); + return orders.stream() + .allMatch(o -> o.child() instanceof FieldAttribute fa && fa.getExactInfo().hasExact() && isAggregatable(fa)); } private List buildFieldSorts(List orders) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 6e480749efb21..777c5c7cbccb3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -213,13 +213,13 @@ public void testProjectStar() { assertProjection(""" from test | keep * - """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary"); + """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary"); } public void testNoProjection() { assertProjection(""" from test - """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary"); + """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary"); assertProjectionTypes( """ from test @@ -232,6 +232,7 @@ public void testNoProjection() { DataTypes.KEYWORD, DataTypes.INTEGER, DataTypes.KEYWORD, + DataTypes.LONG, DataTypes.INTEGER ); } @@ -240,7 +241,7 @@ public void testProjectOrder() { assertProjection(""" from test | keep first_name, *, last_name - """, "first_name", "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "salary", "last_name"); + """, "first_name", "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "long_noidx", "salary", "last_name"); } public void testProjectThenDropName() { @@ -272,21 +273,21 @@ public void testProjectDropPattern() { from test | keep * | drop *_name - """, "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "salary"); + """, "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "long_noidx", "salary"); } public void testProjectDropNoStarPattern() { assertProjection(""" from test | drop *_name - """, "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "salary"); + """, "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "long_noidx", "salary"); } public void testProjectOrderPatternWithRest() { assertProjection(""" from test | keep *name, *, emp_no - """, "first_name", "last_name", "_meta_field", "gender", "job", "job.raw", "languages", "salary", "emp_no"); + """, "first_name", "last_name", "_meta_field", "gender", "job", "job.raw", "languages", "long_noidx", "salary", "emp_no"); } public void testProjectDropPatternAndKeepOthers() { @@ -423,7 +424,7 @@ public void testDropPatternUnsupportedFields() { assertProjection(""" from test | drop *ala* - """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name"); + """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx"); } public void testDropUnsupportedPattern() { @@ -491,7 +492,7 @@ public void testRenameReuseAlias() { assertProjection(""" from test | rename emp_no as e, first_name as e - """, "_meta_field", "e", "gender", "job", "job.raw", "languages", "last_name", "salary"); + """, "_meta_field", "e", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary"); } public void testRenameUnsupportedField() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java index f242c1e082829..7a82ab32b7ef3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java @@ -246,7 +246,19 @@ public void testMissingFieldInFilterNoProjection() { var local = as(localPlan, LocalRelation.class); assertThat( Expressions.names(local.output()), - contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary", "x") + contains( + "_meta_field", + "emp_no", + "first_name", + "gender", + "job", + "job.raw", + "languages", + "last_name", + "long_noidx", + "salary", + "x" + ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 746a34eaedce4..79add5bc08e6b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -390,7 +390,7 @@ public void testExtractorMultiEvalWithDifferentNames() { var extract = as(project.child(), FieldExtractExec.class); assertThat( names(extract.attributesToExtract()), - contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary") + contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary") ); } @@ -420,7 +420,7 @@ public void testExtractorMultiEvalWithSameName() { var extract = as(project.child(), FieldExtractExec.class); assertThat( names(extract.attributesToExtract()), - contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary") + contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary") ); } @@ -877,7 +877,7 @@ public void testPushLimitAndFilterToSource() { assertThat( names(extract.attributesToExtract()), - contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary") + contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary") ); var source = source(extract.child()); @@ -1683,6 +1683,24 @@ public void testNoTextFilterPushDown() { assertNull(source.query()); } + public void testNoNonIndexedFilterPushDown() { + var plan = physicalPlan(""" + from test + | where long_noidx == 1 + """); + + var optimized = optimizedPlan(plan); + var limit = as(optimized, LimitExec.class); + var exchange = asRemoteExchange(limit.child()); + var project = as(exchange.child(), ProjectExec.class); + var extract = as(project.child(), FieldExtractExec.class); + var limit2 = as(extract.child(), LimitExec.class); + var filter = as(limit2.child(), FilterExec.class); + var extract2 = as(filter.child(), FieldExtractExec.class); + var source = source(extract2.child()); + assertNull(source.query()); + } + public void testTextWithRawFilterPushDown() { var plan = physicalPlan(""" from test @@ -1716,6 +1734,23 @@ public void testNoTextSortPushDown() { assertNull(source.sorts()); } + public void testNoNonIndexedSortPushDown() { + var plan = physicalPlan(""" + from test + | sort long_noidx + """); + + var optimized = optimizedPlan(plan); + var topN = as(optimized, TopNExec.class); + var exchange = as(topN.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var extract = as(project.child(), FieldExtractExec.class); + var topN2 = as(extract.child(), TopNExec.class); + var extract2 = as(topN2.child(), FieldExtractExec.class); + var source = source(extract2.child()); + assertNull(source.sorts()); + } + public void testTextWithRawSortPushDown() { var plan = physicalPlan(""" from test From b99702237f8dd7b8319161eab6a62371fe5151bb Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Tue, 19 Sep 2023 11:32:27 -0400 Subject: [PATCH 07/27] [buildkite] Add elastic-agent for monitoring buildkite agents (#99637) --- .buildkite/hooks/pre-command | 5 +++++ .buildkite/scripts/setup-monitoring.sh | 24 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100755 .buildkite/scripts/setup-monitoring.sh diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 42af0d79b34da..3d20e3fb73b8e 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -74,3 +74,8 @@ if [[ "${USE_SNYK_CREDENTIALS:-}" == "true" ]]; then SNYK_TOKEN=$(vault read -field=token secret/ci/elastic-elasticsearch/migrated/snyk) export SNYK_TOKEN fi + +if [[ "$BUILDKITE_AGENT_META_DATA_PROVIDER" != *"k8s"* ]]; then + # Run in the background, while the job continues + nohup .buildkite/scripts/setup-monitoring.sh /dev/null 2>&1 & +fi diff --git a/.buildkite/scripts/setup-monitoring.sh b/.buildkite/scripts/setup-monitoring.sh new file mode 100755 index 0000000000000..95a5b90effea2 --- /dev/null +++ b/.buildkite/scripts/setup-monitoring.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +set -euo pipefail + +ELASTIC_AGENT_URL=$(vault read -field=url secret/ci/elastic-elasticsearch/elastic-agent-token) +ELASTIC_AGENT_TOKEN=$(vault read -field=token secret/ci/elastic-elasticsearch/elastic-agent-token) + +if [[ ! -d /opt/elastic-agent ]]; then + sudo mkdir /opt/elastic-agent + sudo chown -R buildkite-agent:buildkite-agent /opt/elastic-agent + cd /opt/elastic-agent + + archive=elastic-agent-8.10.1-linux-x86_64.tar.gz + if [ "$(uname -m)" = "arm64" ] || [ "$(uname -m)" = "aarch64" ]; then + archive=elastic-agent-8.10.1-linux-arm64.tar.gz + fi + + curl -L -O "https://artifacts.elastic.co/downloads/beats/elastic-agent/$archive" + + tar xzf "$archive" --directory=. --strip-components=1 +fi + +cd /opt/elastic-agent +sudo ./elastic-agent install -f --url="$ELASTIC_AGENT_URL" --enrollment-token="$ELASTIC_AGENT_TOKEN" From 21978866663db06327885554c12f669ed66a4409 Mon Sep 17 00:00:00 2001 From: William Brafford Date: Tue, 19 Sep 2023 11:42:03 -0400 Subject: [PATCH 08/27] Remove unused PersistedClusterStateService#newPersistedClusterStateService (#99671) --- .../plugins/ClusterCoordinationPlugin.java | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/plugins/ClusterCoordinationPlugin.java b/server/src/main/java/org/elasticsearch/plugins/ClusterCoordinationPlugin.java index 28f3a778c01a0..a911cec220f60 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ClusterCoordinationPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/ClusterCoordinationPlugin.java @@ -77,17 +77,6 @@ CoordinationState.PersistedState createPersistedState( } interface PersistedClusterStateServiceFactory { - - @Deprecated(forRemoval = true) - default PersistedClusterStateService newPersistedClusterStateService( - NodeEnvironment nodeEnvironment, - NamedXContentRegistry xContentRegistry, - ClusterSettings clusterSettings, - ThreadPool threadPool - ) { - throw new AssertionError("Should not be called!"); - } - PersistedClusterStateService newPersistedClusterStateService( NodeEnvironment nodeEnvironment, NamedXContentRegistry xContentRegistry, From 369c5f4926e6b2a41067a4a23e76c21b9f8a76dc Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Tue, 19 Sep 2023 11:11:54 -0500 Subject: [PATCH 09/27] Fix doc for deprecated TLS settings (#98513) --- docs/reference/docs/reindex.asciidoc | 12 +++-- docs/reference/settings/common-defs.asciidoc | 11 ++-- .../settings/monitoring-settings.asciidoc | 54 +++++++++---------- .../settings/security-settings.asciidoc | 6 +++ 4 files changed, 47 insertions(+), 36 deletions(-) diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 48b055f4e0fa2..7c3cd8716dfe3 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -1142,6 +1142,7 @@ You cannot specify both `reindex.ssl.certificate_authorities` and `reindex.ssl.truststore.password`:: The password to the truststore (`reindex.ssl.truststore.path`). +deprecated:[7.17.0] Prefer `reindex.ssl.truststore.secure_password` instead. This setting cannot be used with `reindex.ssl.truststore.secure_password`. `reindex.ssl.truststore.secure_password` (<>):: @@ -1175,6 +1176,7 @@ You cannot specify both `reindex.ssl.key` and `reindex.ssl.keystore.path`. `reindex.ssl.key_passphrase`:: Specifies the passphrase to decrypt the PEM encoded private key (`reindex.ssl.key`) if it is encrypted. +deprecated:[7.17.0] Prefer `reindex.ssl.secure_key_passphrase` instead. Cannot be used with `reindex.ssl.secure_key_passphrase`. `reindex.ssl.secure_key_passphrase` (<>):: @@ -1194,8 +1196,9 @@ If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting defaults to `PKCS12`. Otherwise, it defaults to `jks`. `reindex.ssl.keystore.password`:: -The password to the keystore (`reindex.ssl.keystore.path`). This setting cannot be used -with `reindex.ssl.keystore.secure_password`. +The password to the keystore (`reindex.ssl.keystore.path`). +deprecated:[7.17.0] Prefer `reindex.ssl.keystore.secure_password` instead. +This setting cannot be used with `reindex.ssl.keystore.secure_password`. `reindex.ssl.keystore.secure_password` (<>):: The password to the keystore (`reindex.ssl.keystore.path`). @@ -1203,8 +1206,9 @@ This setting cannot be used with `reindex.ssl.keystore.password`. `reindex.ssl.keystore.key_password`:: The password for the key in the keystore (`reindex.ssl.keystore.path`). -Defaults to the keystore password. This setting cannot be used with -`reindex.ssl.keystore.secure_key_password`. +Defaults to the keystore password. +deprecated:[7.17.0] Prefer `reindex.ssl.keystore.secure_key_password` instead. +This setting cannot be used with `reindex.ssl.keystore.secure_key_password`. `reindex.ssl.keystore.secure_key_password` (<>):: The password for the key in the keystore (`reindex.ssl.keystore.path`). diff --git a/docs/reference/settings/common-defs.asciidoc b/docs/reference/settings/common-defs.asciidoc index 33e736c70046e..a369b3870d9c4 100644 --- a/docs/reference/settings/common-defs.asciidoc +++ b/docs/reference/settings/common-defs.asciidoc @@ -57,20 +57,21 @@ end::ssl-key-pem[] tag::ssl-key-passphrase[] The passphrase that is used to decrypt the private key. Since the key might not -be encrypted, this value is optional. +be encrypted, this value is optional. deprecated:[7.17.0] Prefer `ssl.secure_key_passphrase` instead. + You cannot use this setting and `ssl.secure_key_passphrase` at the same time. end::ssl-key-passphrase[] tag::ssl-keystore-key-password[] The password for the key in the keystore. The default is the keystore password. +deprecated:[7.17.0] Prefer `ssl.keystore.secure_key_password` instead. + You cannot use this setting and `ssl.keystore.secure_password` at the same time. //TBD: You cannot use this setting and `ssl.keystore.secure_key_password` at the same time. end::ssl-keystore-key-password[] tag::ssl-keystore-password[] -The password for the keystore. +The password for the keystore. deprecated:[7.17.0] Prefer `ssl.keystore.secure_password` instead. //TBD: You cannot use this setting and `ssl.keystore.secure_password` at the same time. end::ssl-keystore-password[] @@ -122,7 +123,7 @@ or `SSLv3`. See <>. end::ssl-supported-protocols[] tag::ssl-truststore-password[] -The password for the truststore. +The password for the truststore. deprecated:[7.17.0] Prefer `ssl.truststore.secure_password` instead. + You cannot use this setting and `ssl.truststore.secure_password` at the same time. @@ -160,7 +161,7 @@ Authority (CA); has a `hostname` or IP address that matches the names within the certificate. `certificate`:: -Validates the provided certificate and verifies that it's signed by a +Validates the provided certificate and verifies that it's signed by a trusted authority (CA), but doesn't check the certificate `hostname`. `none`:: @@ -173,4 +174,4 @@ resolve TLS errors. ===== + Defaults to `full`. -end::ssl-verification-mode-values[] \ No newline at end of file +end::ssl-verification-mode-values[] diff --git a/docs/reference/settings/monitoring-settings.asciidoc b/docs/reference/settings/monitoring-settings.asciidoc index 2ff94e6885226..a10116005a6b5 100644 --- a/docs/reference/settings/monitoring-settings.asciidoc +++ b/docs/reference/settings/monitoring-settings.asciidoc @@ -49,7 +49,7 @@ and {ls} is ignored. [[xpack-monitoring-collection-interval]] // tag::monitoring-collection-interval-tag[] `xpack.monitoring.collection.interval` {ess-icon}:: -deprecated:[6.3.0,"Use `xpack.monitoring.collection.enabled` set to `false` instead."] +deprecated:[6.3.0,"Use `xpack.monitoring.collection.enabled` set to `false` instead."] (<>) Setting to `-1` to disable data collection is no longer supported beginning with 7.0.0. + @@ -60,7 +60,7 @@ option in `kibana.yml` to the same value. `xpack.monitoring.elasticsearch.collection.enabled`:: (<>) deprecated:[7.16.0] Controls whether statistics about your -{es} cluster should be collected. Defaults to `true`. This is different from +{es} cluster should be collected. Defaults to `true`. This is different from `xpack.monitoring.collection.enabled`, which allows you to enable or disable all monitoring collection. However, this setting simply disables the collection of {es} data while still allowing other data (e.g., {kib}, {ls}, Beats, or APM @@ -285,18 +285,18 @@ For example: `["elasticsearch_version_mismatch","xpack_license_expiration"]`. You can configure the following TLS/SSL settings. +{ssl-prefix}.ssl.supported_protocols+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-supported-protocols] ifdef::verifies[] +{ssl-prefix}.ssl.verification_mode+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] Controls the verification of certificates. include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-verification-mode-values] endif::verifies[] +{ssl-prefix}.ssl.cipher_suites+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-cipher-suites-values] [#{ssl-context}-tls-ssl-key-trusted-certificate-settings] @@ -318,19 +318,19 @@ When using PEM encoded files, use the following settings: include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-key-pem] +{ssl-prefix}.ssl.key_passphrase+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-key-passphrase] +{ssl-prefix}.ssl.secure_key_passphrase+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-secure-key-passphrase] +{ssl-prefix}.ssl.certificate+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-certificate] +{ssl-prefix}.ssl.certificate_authorities+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-certificate-authorities] ===== Java keystore files @@ -339,35 +339,35 @@ When using Java keystore files (JKS), which contain the private key, certificate and certificates that should be trusted, use the following settings: +{ssl-prefix}.ssl.keystore.path+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-path] +{ssl-prefix}.ssl.keystore.password+:: -(<>) deprecated:[7.16.0] +(<>) include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-password] +{ssl-prefix}.ssl.keystore.secure_password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-secure-password] +{ssl-prefix}.ssl.keystore.key_password+:: -(<>) deprecated:[7.16.0] +(<>) include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-key-password] +{ssl-prefix}.ssl.keystore.secure_key_password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-secure-key-password] +{ssl-prefix}.ssl.truststore.path+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-path] +{ssl-prefix}.ssl.truststore.password+:: -(<>) deprecated:[7.16.0] +(<>) include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-password] +{ssl-prefix}.ssl.truststore.secure_password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-secure-password] [#{ssl-context}-pkcs12-files] @@ -379,43 +379,43 @@ that contain the private key, certificate and certificates that should be truste PKCS#12 files are configured in the same way as Java keystore files: +{ssl-prefix}.ssl.keystore.path+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-path] +{ssl-prefix}.ssl.keystore.type+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-type-pkcs12] +{ssl-prefix}.ssl.keystore.password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-password] +{ssl-prefix}.ssl.keystore.secure_password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-secure-password] +{ssl-prefix}.ssl.keystore.key_password+:: -(<>) deprecated:[7.16.0] +(<>) include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-key-password] +{ssl-prefix}.ssl.keystore.secure_key_password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-secure-key-password] +{ssl-prefix}.ssl.truststore.path+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-path] +{ssl-prefix}.ssl.truststore.type+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] Set this to `PKCS12` to indicate that the truststore is a PKCS#12 file. //TBD:Should this use the ssl-truststore-type definition and default values? +{ssl-prefix}.ssl.truststore.password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-password] +{ssl-prefix}.ssl.truststore.secure_password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-secure-password] diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index bffd99a3ab5d8..12999c1b35c51 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -2414,6 +2414,12 @@ include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-path] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-type-pkcs12] // end::jwt-ssl-keystore-type-tag[] +// tag::jwt-ssl-keystore-password-tag[] +`ssl.keystore.password` {ess-icon}:: +(<>) +include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-password] +// end::jwt-ssl-keystore-password-tag[] + `ssl.keystore.secure_password`:: (<>) include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-secure-password] From 0a6493064b8dfab5f7151e859578349b4aaa11ec Mon Sep 17 00:00:00 2001 From: Jonathan Buttner <56361221+jonathan-buttner@users.noreply.github.com> Date: Tue, 19 Sep 2023 13:05:58 -0400 Subject: [PATCH 10/27] [ML] Switching to 1 mb chunks for ELSER model import (#99677) * Switching to 1 mb chunks * Update docs/changelog/99677.yaml * Adjusting the description --- docs/changelog/99677.yaml | 5 +++++ .../xpack/ml/packageloader/action/ModelImporter.java | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/99677.yaml diff --git a/docs/changelog/99677.yaml b/docs/changelog/99677.yaml new file mode 100644 index 0000000000000..04c1c28cf2e12 --- /dev/null +++ b/docs/changelog/99677.yaml @@ -0,0 +1,5 @@ +pr: 99677 +summary: Using 1 MB chunks for elser model storage +area: Machine Learning +type: bug +issues: [ ] diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java index 113cb2c092376..5a6eac0cc3b76 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java @@ -40,7 +40,7 @@ * A helper class for abstracting out the use of the ModelLoaderUtils to make dependency injection testing easier. */ class ModelImporter { - private static final int DEFAULT_CHUNK_SIZE = 4 * 1024 * 1024; // 4MB + private static final int DEFAULT_CHUNK_SIZE = 1024 * 1024; // 1MB private static final Logger logger = LogManager.getLogger(ModelImporter.class); private final Client client; private final String modelId; From 34eea49ef56493b3395bfa869587687e67dee80c Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Tue, 19 Sep 2023 20:22:34 +0200 Subject: [PATCH 11/27] ESQL: Swap arguments of remaining date_xxx() functions (#99561) This swaps the argument of `date_extract()`, `date_format()` and `date_parse()` functions, to align with `date_trunc()`. The field argument is now always last, even for _format() and _parse(), whose optional argument will now be provided as the first one. --- .../esql/functions/date_format.asciidoc | 2 +- .../functions/types/date_extract.asciidoc | 2 +- .../rest-api-spec/test/70_locale.yml | 4 +- .../resources/blog-ignoreCsvTests.csv-spec | 2 +- .../src/main/resources/date.csv-spec | 38 +++++++++---------- .../src/main/resources/docs.csv-spec | 8 ++-- .../src/main/resources/stats.csv-spec | 2 +- .../resources/stats_count_distinct.csv-spec | 2 +- .../esql/action/EsqlActionRuntimeFieldIT.java | 2 +- .../scalar/date/BinaryDateTimeFunction.java | 11 ++++++ .../function/scalar/date/DateExtract.java | 28 +++++++++----- .../function/scalar/date/DateFormat.java | 26 +++++++++---- .../function/scalar/date/DateParse.java | 17 +++++---- .../function/scalar/date/DateTrunc.java | 18 +++------ .../xpack/esql/analysis/AnalyzerTests.java | 32 +++++++++++----- .../scalar/date/DateExtractTests.java | 10 ++--- .../function/scalar/date/DateParseTests.java | 8 ++-- .../xpack/esql/planner/EvalMapperTests.java | 4 +- .../session/IndexResolverFieldNamesTests.java | 10 ++--- 19 files changed, 133 insertions(+), 93 deletions(-) diff --git a/docs/reference/esql/functions/date_format.asciidoc b/docs/reference/esql/functions/date_format.asciidoc index 3f61e07221111..40bf024a3469d 100644 --- a/docs/reference/esql/functions/date_format.asciidoc +++ b/docs/reference/esql/functions/date_format.asciidoc @@ -7,5 +7,5 @@ is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. ---- FROM employees | KEEP first_name, last_name, hire_date -| EVAL hired = DATE_FORMAT(hire_date, "YYYY-MM-dd") +| EVAL hired = DATE_FORMAT("YYYY-MM-dd", hire_date) ---- diff --git a/docs/reference/esql/functions/types/date_extract.asciidoc b/docs/reference/esql/functions/types/date_extract.asciidoc index 57a83810d9b7c..9963c85b2af85 100644 --- a/docs/reference/esql/functions/types/date_extract.asciidoc +++ b/docs/reference/esql/functions/types/date_extract.asciidoc @@ -1,5 +1,5 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== arg1 | arg2 | result -datetime | keyword | long +keyword | datetime | long |=== diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/70_locale.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/70_locale.yml index 06d2b5e461822..91ff5ddc7cbe9 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/70_locale.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/70_locale.yml @@ -26,7 +26,7 @@ setup: - do: esql.query: body: - query: 'FROM events | eval fixed_format = date_format(@timestamp, "MMMM"), variable_format = date_format(@timestamp, format) | sort @timestamp | keep @timestamp, fixed_format, variable_format' + query: 'FROM events | eval fixed_format = date_format("MMMM", @timestamp), variable_format = date_format(format, @timestamp) | sort @timestamp | keep @timestamp, fixed_format, variable_format' - match: { columns.0.name: "@timestamp" } - match: { columns.0.type: "date" } @@ -45,7 +45,7 @@ setup: - do: esql.query: body: - query: 'FROM events | eval fixed_format = date_format(@timestamp, "MMMM"), variable_format = date_format(@timestamp, format) | sort @timestamp | keep @timestamp, fixed_format, variable_format' + query: 'FROM events | eval fixed_format = date_format("MMMM", @timestamp), variable_format = date_format(format, @timestamp) | sort @timestamp | keep @timestamp, fixed_format, variable_format' locale: "it-IT" - match: { columns.0.name: "@timestamp" } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/blog-ignoreCsvTests.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/blog-ignoreCsvTests.csv-spec index f670738bd3c49..6ddc9601db4ac 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/blog-ignoreCsvTests.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/blog-ignoreCsvTests.csv-spec @@ -2,7 +2,7 @@ FROM employees | WHERE still_hired == true -| EVAL hired = DATE_FORMAT(hire_date, "YYYY") +| EVAL hired = DATE_FORMAT("YYYY", hire_date) | STATS avg_salary = AVG(salary) BY languages | EVAL avg_salary = ROUND(avg_salary) | EVAL lang_code = TO_STRING(languages) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index e04e870da7713..41b068595b4cd 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -45,7 +45,7 @@ emp_no:integer | x:date evalDateFormat -from employees | sort hire_date | eval x = date_format(hire_date), y = date_format(hire_date, "YYYY-MM-dd") | keep emp_no, x, y | limit 5; +from employees | sort hire_date | eval x = date_format(hire_date), y = date_format("YYYY-MM-dd", hire_date) | keep emp_no, x, y | limit 5; emp_no:integer | x:keyword | y:keyword 10009 | 1985-02-18T00:00:00.000Z | 1985-02-18 @@ -295,7 +295,7 @@ hire_date:date | hd:date ; now -row a = now() | eval x = a == now(), y = substring(date_format(a, "yyyy"), 0, 2) | keep x, y; +row a = now() | eval x = a == now(), y = substring(date_format("yyyy", a), 0, 2) | keep x, y; x:boolean | y:keyword true | 20 @@ -338,14 +338,14 @@ AVG(salary):double | bucket:date ; evalDateParseWithSimpleDate -row a = "2023-02-01" | eval b = date_parse(a, "yyyy-MM-dd") | keep b; +row a = "2023-02-01" | eval b = date_parse("yyyy-MM-dd", a) | keep b; b:datetime 2023-02-01T00:00:00.000Z ; evalDateParseWithDateTime -row a = "2023-02-01 12:15:55" | eval b = date_parse(a, "yyyy-MM-dd HH:mm:ss") | keep b; +row a = "2023-02-01 12:15:55" | eval b = date_parse("yyyy-MM-dd HH:mm:ss", a) | keep b; b:datetime 2023-02-01T12:15:55.000Z @@ -359,8 +359,8 @@ b:datetime ; evalDateParseWrongDate -row a = "2023-02-01 foo" | eval b = date_parse(a, "yyyy-MM-dd") | keep b; -warning:Line 1:37: evaluation of [date_parse(a, \"yyyy-MM-dd\")] failed, treating result as null. Only first 20 failures recorded. +row a = "2023-02-01 foo" | eval b = date_parse("yyyy-MM-dd", a) | keep b; +warning:Line 1:37: evaluation of [date_parse(\"yyyy-MM-dd\", a)] failed, treating result as null. Only first 20 failures recorded. warning:java.lang.IllegalArgumentException: failed to parse date field [2023-02-01 foo] with format [yyyy-MM-dd] b:datetime @@ -368,16 +368,16 @@ null ; evalDateParseNotMatching -row a = "2023-02-01" | eval b = date_parse(a, "yyyy-MM") | keep b; -warning:Line 1:33: evaluation of [date_parse(a, \"yyyy-MM\")] failed, treating result as null. Only first 20 failures recorded. +row a = "2023-02-01" | eval b = date_parse("yyyy-MM", a) | keep b; +warning:Line 1:33: evaluation of [date_parse(\"yyyy-MM\", a)] failed, treating result as null. Only first 20 failures recorded. warning:java.lang.IllegalArgumentException: failed to parse date field [2023-02-01] with format [yyyy-MM] b:datetime null ; evalDateParseNotMatching2 -row a = "2023-02-01" | eval b = date_parse(a, "yyyy-MM-dd HH:mm:ss") | keep b; -warning:Line 1:33: evaluation of [date_parse(a, \"yyyy-MM-dd HH:mm:ss\")] failed, treating result as null. Only first 20 failures recorded. +row a = "2023-02-01" | eval b = date_parse("yyyy-MM-dd HH:mm:ss", a) | keep b; +warning:Line 1:33: evaluation of [date_parse(\"yyyy-MM-dd HH:mm:ss\", a)] failed, treating result as null. Only first 20 failures recorded. warning:java.lang.IllegalArgumentException: failed to parse date field [2023-02-01] with format [yyyy-MM-dd HH:mm:ss] b:datetime @@ -385,7 +385,7 @@ null ; evalDateParseNullPattern -row a = "2023-02-01" | eval b = date_parse(a, null) | keep b; +row a = "2023-02-01" | eval b = date_parse(null, a) | keep b; b:datetime null @@ -393,8 +393,8 @@ null evalDateParseDynamic from employees | where emp_no == 10039 or emp_no == 10040 | sort emp_no -| eval birth_date_string = date_format(birth_date, "yyyy-MM-dd") -| eval new_date = date_parse(birth_date_string, "yyyy-MM-dd") | eval bool = new_date == birth_date | keep emp_no, new_date, birth_date, bool; +| eval birth_date_string = date_format("yyyy-MM-dd", birth_date) +| eval new_date = date_parse("yyyy-MM-dd", birth_date_string) | eval bool = new_date == birth_date | keep emp_no, new_date, birth_date, bool; emp_no:integer | new_date:datetime | birth_date:datetime | bool:boolean 10039 | 1959-10-01 | 1959-10-01 | true @@ -403,8 +403,8 @@ emp_no:integer | new_date:datetime | birth_date:datetime | bool:boolean evalDateParseDynamic2 from employees | where emp_no >= 10047 | sort emp_no | where emp_no <= 10051 -| eval birth_date_string = date_format(birth_date, "yyyy-MM-dd") -| eval new_date = date_parse(birth_date_string, "yyyy-MM-dd") +| eval birth_date_string = date_format("yyyy-MM-dd", birth_date) +| eval new_date = date_parse("yyyy-MM-dd", birth_date_string) | keep emp_no, new_date, birth_date | eval bool = new_date == birth_date; emp_no:integer | new_date:datetime | birth_date:datetime | bool:boolean @@ -418,8 +418,8 @@ emp_no:integer | new_date:datetime | birth_date:datetime | bool:boo evalDateParseDynamicDateAndPattern from employees | where emp_no == 10049 or emp_no == 10050 | sort emp_no -| eval pattern = "yyyy-MM-dd", birth_date_string = date_format(birth_date, pattern) -| eval new_date = date_parse(birth_date_string, "yyyy-MM-dd") | eval bool = new_date == birth_date | keep emp_no, new_date, birth_date, bool; +| eval pattern = "yyyy-MM-dd", birth_date_string = date_format(pattern, birth_date) +| eval new_date = date_parse("yyyy-MM-dd", birth_date_string) | eval bool = new_date == birth_date | keep emp_no, new_date, birth_date, bool; emp_no:integer | new_date:datetime | birth_date:datetime | bool:boolean 10049 | null | null | null @@ -437,7 +437,7 @@ emp_no:integer | new_date:datetime | birth_date:datetime | bool: dateFields from employees | where emp_no == 10049 or emp_no == 10050 -| eval year = date_extract(birth_date, "year"), month = date_extract(birth_date, "month_of_year"), day = date_extract(birth_date, "day_of_month") +| eval year = date_extract("year", birth_date), month = date_extract("month_of_year", birth_date), day = date_extract("day_of_month", birth_date) | keep emp_no, year, month, day; ignoreOrder:true @@ -449,7 +449,7 @@ emp_no:integer | year:long | month:long | day:long dateFormatLocale from employees | where emp_no == 10049 or emp_no == 10050 | sort emp_no -| eval birth_month = date_format(birth_date, "MMMM") | keep emp_no, birth_date, birth_month; +| eval birth_month = date_format("MMMM", birth_date) | keep emp_no, birth_date, birth_month; ignoreOrder:true emp_no:integer | birth_date:datetime | birth_month:keyword diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index 137820e695892..d1aa4dd811df3 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -233,7 +233,7 @@ avg_lang:double | max_lang:integer docsStatsGroupByMultipleValues // tag::statsGroupByMultipleValues[] FROM employees -| EVAL hired = DATE_FORMAT(hire_date, "YYYY") +| EVAL hired = DATE_FORMAT("YYYY", hire_date) | STATS avg_salary = AVG(salary) BY hired, languages.long | EVAL avg_salary = ROUND(avg_salary) | SORT hired, languages.long @@ -293,8 +293,8 @@ Uri |Lenart |1.75 dateExtract // tag::dateExtract[] -ROW date = DATE_PARSE("2022-05-06", "yyyy-MM-dd") -| EVAL year = DATE_EXTRACT(date, "year") +ROW date = DATE_PARSE("yyyy-MM-dd", "2022-05-06") +| EVAL year = DATE_EXTRACT("year", date) // end::dateExtract[] ; @@ -404,7 +404,7 @@ Saniya |Kalloufi |2.1 |6.9 dateParse // tag::dateParse[] ROW date_string = "2022-05-06" -| EVAL date = DATE_PARSE(date_string, "yyyy-MM-dd") +| EVAL date = DATE_PARSE("yyyy-MM-dd", date_string) // end::dateParse[] ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index da559485d17ff..89e4c3b2b8174 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -322,7 +322,7 @@ c:long | g:keyword | tws:long ; byStringAndString -from employees | eval hire_year_str = date_format(hire_date, "yyyy") | stats c = count(gender) by gender, hire_year_str | sort c desc, gender, hire_year_str | where c >= 5; +from employees | eval hire_year_str = date_format("yyyy", hire_date) | stats c = count(gender) by gender, hire_year_str | sort c desc, gender, hire_year_str | where c >= 5; c:long | gender:keyword | hire_year_str:keyword 8 | F | 1989 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec index 2af60793f3416..462045d9968ee 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec @@ -68,7 +68,7 @@ c:long ; countDistinctOfKeywords -from employees | eval hire_year_str = date_format(hire_date, "yyyy") | stats g = count_distinct(gender), h = count_distinct(hire_year_str); +from employees | eval hire_year_str = date_format("yyyy", hire_date) | stats g = count_distinct(gender), h = count_distinct(hire_year_str); g:long | h:long 2 | 14 diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java index bdbcd9a548f58..76c874b0fe63d 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java @@ -88,7 +88,7 @@ public void testBoolean() throws InterruptedException, IOException { public void testDate() throws InterruptedException, IOException { createIndexWithConstRuntimeField("date"); EsqlQueryResponse response = run(""" - from test | eval d=date_format(const, "yyyy") | stats min (foo) by d"""); + from test | eval d=date_format("yyyy", const) | stats min (foo) by d"""); assertThat(getValuesList(response), equalTo(List.of(List.of(0L, "2023")))); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/BinaryDateTimeFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/BinaryDateTimeFunction.java index c7c923e8e912a..455c9d162dc8a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/BinaryDateTimeFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/BinaryDateTimeFunction.java @@ -17,6 +17,9 @@ import java.time.ZoneId; import java.time.ZoneOffset; import java.util.Objects; +import java.util.function.Predicate; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; public abstract class BinaryDateTimeFunction extends BinaryScalarFunction { @@ -66,4 +69,12 @@ public boolean equals(Object o) { BinaryDateTimeFunction that = (BinaryDateTimeFunction) o; return zoneId().equals(that.zoneId()); } + + // TODO: drop check once 8.11 is released + static TypeResolution argumentTypesAreSwapped(DataType left, DataType right, Predicate rightTest, String source) { + if (DataTypes.isDateTime(left) && rightTest.test(right)) { + return new TypeResolution(format(null, "function definition has been updated, please swap arguments in [{}]", source)); + } + return TypeResolution.TYPE_RESOLVED; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java index 66dbb1dd33901..1b33d5829e472 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java @@ -31,6 +31,7 @@ import java.util.Locale; import java.util.function.Function; +import static org.elasticsearch.xpack.esql.expression.function.scalar.date.BinaryDateTimeFunction.argumentTypesAreSwapped; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isDate; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isStringAndExact; @@ -38,22 +39,22 @@ public class DateExtract extends ConfigurationFunction implements EvaluatorMappe private ChronoField chronoField; - public DateExtract(Source source, Expression field, Expression chronoFieldExp, Configuration configuration) { - super(source, List.of(field, chronoFieldExp), configuration); + public DateExtract(Source source, Expression chronoFieldExp, Expression field, Configuration configuration) { + super(source, List.of(chronoFieldExp, field), configuration); } @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { - var fieldEvaluator = toEvaluator.apply(children().get(0)); - if (children().get(1).foldable()) { + var fieldEvaluator = toEvaluator.apply(children().get(1)); + if (children().get(0).foldable()) { ChronoField chrono = chronoField(); if (chrono == null) { - BytesRef field = (BytesRef) children().get(1).fold(); + BytesRef field = (BytesRef) children().get(0).fold(); throw new EsqlIllegalArgumentException("invalid date field for [{}]: {}", sourceText(), field.utf8ToString()); } return dvrCtx -> new DateExtractConstantEvaluator(fieldEvaluator.get(dvrCtx), chrono, configuration().zoneId(), dvrCtx); } - var chronoEvaluator = toEvaluator.apply(children().get(1)); + var chronoEvaluator = toEvaluator.apply(children().get(0)); return dvrCtx -> new DateExtractEvaluator( source(), fieldEvaluator.get(dvrCtx), @@ -65,7 +66,7 @@ public ExpressionEvaluator.Factory toEvaluator(Function newChildren) { @Override protected NodeInfo info() { - return NodeInfo.create(this, DateFormat::new, field, format, configuration()); + Expression first = format != null ? format : field; + Expression second = format != null ? field : null; + return NodeInfo.create(this, DateFormat::new, first, second, configuration()); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java index 98d75cbf672df..d1565091d320c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java @@ -24,7 +24,6 @@ import org.elasticsearch.xpack.ql.type.DataTypes; import java.time.ZoneId; -import java.util.Arrays; import java.util.List; import java.util.function.Function; @@ -41,10 +40,10 @@ public class DateParse extends ScalarFunction implements OptionalArgument, Evalu private final Expression field; private final Expression format; - public DateParse(Source source, Expression field, Expression format) { - super(source, format != null ? Arrays.asList(field, format) : Arrays.asList(field)); - this.field = field; - this.format = format; + public DateParse(Source source, Expression first, Expression second) { + super(source, second != null ? List.of(first, second) : List.of(first)); + this.field = second != null ? second : first; + this.format = second != null ? first : null; } @Override @@ -58,12 +57,12 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution resolution = isString(field, sourceText(), FIRST); + TypeResolution resolution = isString(field, sourceText(), format != null ? SECOND : FIRST); if (resolution.unresolved()) { return resolution; } if (format != null) { - resolution = isStringAndExact(format, sourceText(), SECOND); + resolution = isStringAndExact(format, sourceText(), FIRST); if (resolution.unresolved()) { return resolution; } @@ -126,7 +125,9 @@ public Expression replaceChildren(List newChildren) { @Override protected NodeInfo info() { - return NodeInfo.create(this, DateParse::new, field, format); + Expression first = format != null ? format : field; + Expression second = format != null ? field : null; + return NodeInfo.create(this, DateParse::new, first, second); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java index d7964e6c011fd..55885bf514fe2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.ql.expression.function.scalar.BinaryScalarFunction; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; -import org.elasticsearch.xpack.ql.type.DataTypes; import java.time.Duration; import java.time.Period; @@ -26,8 +25,6 @@ import java.util.concurrent.TimeUnit; import java.util.function.Function; -import static org.elasticsearch.common.logging.LoggerMessageFormat.format; -import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isDate; @@ -45,7 +42,12 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution resolution = argumentTypesAreSwapped(); + TypeResolution resolution = argumentTypesAreSwapped( + left().dataType(), + right().dataType(), + EsqlDataTypes::isTemporalAmount, + sourceText() + ); if (resolution.unresolved()) { return resolution; } @@ -58,14 +60,6 @@ protected TypeResolution resolveType() { return isType(interval(), EsqlDataTypes::isTemporalAmount, sourceText(), SECOND, "dateperiod", "timeduration"); } - // TODO: drop check once 8.11 is released - private TypeResolution argumentTypesAreSwapped() { - if (DataTypes.isDateTime(left().dataType()) && isTemporalAmount(right().dataType())) { - return new TypeResolution(format(null, "function definition has been updated, please swap arguments in [{}]", sourceText())); - } - return TypeResolution.TYPE_RESOLVED; - } - @Override public Object fold() { return EvaluatorMapper.super.fold(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 777c5c7cbccb3..06050e41f73de 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -917,36 +917,36 @@ public void testDateFormatOnText() { public void testDateFormatWithNumericFormat() { verifyUnsupported(""" from test - | eval date_format(date, 1) - """, "second argument of [date_format(date, 1)] must be [string], found value [1] type [integer]"); + | eval date_format(1, date) + """, "first argument of [date_format(1, date)] must be [string], found value [1] type [integer]"); } public void testDateFormatWithDateFormat() { verifyUnsupported(""" from test | eval date_format(date, date) - """, "second argument of [date_format(date, date)] must be [string], found value [date] type [datetime]"); + """, "first argument of [date_format(date, date)] must be [string], found value [date] type [datetime]"); } public void testDateParseOnInt() { verifyUnsupported(""" from test - | eval date_parse(int, keyword) - """, "first argument of [date_parse(int, keyword)] must be [string], found value [int] type [integer]"); + | eval date_parse(keyword, int) + """, "second argument of [date_parse(keyword, int)] must be [string], found value [int] type [integer]"); } public void testDateParseOnDate() { verifyUnsupported(""" from test - | eval date_parse(date, keyword) - """, "first argument of [date_parse(date, keyword)] must be [string], found value [date] type [datetime]"); + | eval date_parse(keyword, date) + """, "second argument of [date_parse(keyword, date)] must be [string], found value [date] type [datetime]"); } public void testDateParseOnIntPattern() { verifyUnsupported(""" from test - | eval date_parse(keyword, int) - """, "second argument of [date_parse(keyword, int)] must be [string], found value [int] type [integer]"); + | eval date_parse(int, keyword) + """, "first argument of [date_parse(int, keyword)] must be [string], found value [int] type [integer]"); } public void testDateTruncOnInt() { @@ -977,6 +977,20 @@ public void testDateTruncWithNumericInterval() { """, "second argument of [date_trunc(1, date)] must be [dateperiod or timeduration], found value [1] type [integer]"); } + public void testDateExtractWithSwappedArguments() { + verifyUnsupported(""" + from test + | eval date_extract(date, "year") + """, "function definition has been updated, please swap arguments in [date_extract(date, \"year\")]"); + } + + public void testDateFormatWithSwappedArguments() { + verifyUnsupported(""" + from test + | eval date_format(date, "yyyy-MM-dd") + """, "function definition has been updated, please swap arguments in [date_format(date, \"yyyy-MM-dd\")]"); + } + public void testDateTruncWithSwappedArguments() { verifyUnsupported(""" from test diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java index a87e7c5eb5bb1..96c35905e3dc0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java @@ -39,10 +39,10 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Date Extract Year", () -> { return new TestCaseSupplier.TestCase( List.of( - new TestCaseSupplier.TypedData(1687944333000L, DataTypes.DATETIME, "date"), - new TestCaseSupplier.TypedData(new BytesRef("YEAR"), DataTypes.KEYWORD, "field") + new TestCaseSupplier.TypedData(new BytesRef("YEAR"), DataTypes.KEYWORD, "field"), + new TestCaseSupplier.TypedData(1687944333000L, DataTypes.DATETIME, "date") ), - "DateExtractEvaluator[value=Attribute[channel=0], chronoField=Attribute[channel=1], zone=Z]", + "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", DataTypes.LONG, equalTo(2023L) ); @@ -55,8 +55,8 @@ public void testAllChronoFields() { for (ChronoField value : ChronoField.values()) { DateExtract instance = new DateExtract( Source.EMPTY, - new Literal(Source.EMPTY, epochMilli, DataTypes.DATETIME), new Literal(Source.EMPTY, new BytesRef(value.name()), DataTypes.KEYWORD), + new Literal(Source.EMPTY, epochMilli, DataTypes.DATETIME), EsqlTestUtils.TEST_CFG ); @@ -75,7 +75,7 @@ protected Expression build(Source source, List args) { @Override protected List argSpec() { - return List.of(required(DataTypes.DATETIME), required(strings())); + return List.of(required(strings()), required(DataTypes.DATETIME)); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java index 48e70e929f8e1..115892640f2b1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java @@ -33,10 +33,10 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Basic Case", () -> { return new TestCaseSupplier.TestCase( List.of( - new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataTypes.KEYWORD, "first"), - new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataTypes.KEYWORD, "second") + new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataTypes.KEYWORD, "second"), + new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataTypes.KEYWORD, "first") ), - "DateParseEvaluator[val=Attribute[channel=0], formatter=Attribute[channel=1], zoneId=Z]", + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", DataTypes.DATETIME, equalTo(1683244800000L) ); @@ -50,7 +50,7 @@ protected Expression build(Source source, List args) { @Override protected List argSpec() { - return List.of(required(strings()), optional(strings())); + return List.of(optional(strings()), required(strings())); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java index 7956892c34645..efe8e773bfdaa 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java @@ -103,8 +103,8 @@ public static List params() { DOUBLE1, literal, new Length(Source.EMPTY, literal), - new DateFormat(Source.EMPTY, DATE, datePattern, TEST_CONFIG), - new DateFormat(Source.EMPTY, literal, datePattern, TEST_CONFIG), + new DateFormat(Source.EMPTY, datePattern, DATE, TEST_CONFIG), + new DateFormat(Source.EMPTY, datePattern, literal, TEST_CONFIG), new StartsWith(Source.EMPTY, literal, literal), new Substring(Source.EMPTY, literal, LONG, LONG), new DateTrunc(Source.EMPTY, dateInterval, DATE) }) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java index bbd6906221aa5..7e3b9117b6410 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java @@ -236,8 +236,8 @@ public void testEvalDateParseDynamic() { from employees | where emp_no == 10039 or emp_no == 10040 | sort emp_no - | eval birth_date_string = date_format(birth_date, "yyyy-MM-dd") - | eval new_date = date_parse(birth_date_string, "yyyy-MM-dd") + | eval birth_date_string = date_format("yyyy-MM-dd", birth_date) + | eval new_date = date_parse("yyyy-MM-dd", birth_date_string) | eval bool = new_date == birth_date | keep emp_no, new_date, birth_date, bool""", Set.of("emp_no", "emp_no.*", "birth_date", "birth_date.*")); } @@ -246,7 +246,7 @@ public void testDateFields() { assertFieldNames(""" from employees | where emp_no == 10049 or emp_no == 10050 - | eval year = date_extract(birth_date, "year"), month = date_extract(birth_date, "month_of_year") + | eval year = date_extract("year", birth_date), month = date_extract("month_of_year", birth_date) | keep emp_no, year, month""", Set.of("emp_no", "emp_no.*", "birth_date", "birth_date.*")); } @@ -793,7 +793,7 @@ public void testByStringAndLongWithAlias() { public void testByStringAndString() { assertFieldNames(""" from employees - | eval hire_year_str = date_format(hire_date, "yyyy") + | eval hire_year_str = date_format("yyyy", hire_date) | stats c = count(gender) by gender, hire_year_str | sort c desc, gender, hire_year_str | where c >= 5""", Set.of("hire_date", "hire_date.*", "gender", "gender.*")); @@ -822,7 +822,7 @@ public void testCountDistinctOfKeywords() { assertFieldNames( """ from employees - | eval hire_year_str = date_format(hire_date, "yyyy") + | eval hire_year_str = date_format("yyyy", hire_date) | stats g = count_distinct(gender), h = count_distinct(hire_year_str)""", Set.of("hire_date", "hire_date.*", "gender", "gender.*") ); From c7c3f877e8c77b2f98f9c884892f14f3aaef23e9 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 19 Sep 2023 20:45:37 +0200 Subject: [PATCH 12/27] Add java.net.NetPermission for apm's plugin security (#99474) when apm is enabled it throws a security manager exception: java.security.AccessControlException: access denied ("java.net.NetPermission" "getProxySelector") This commit adds a permission so that apm can be enabled --- docs/changelog/99474.yaml | 5 +++++ modules/apm/src/main/plugin-metadata/plugin-security.policy | 1 + 2 files changed, 6 insertions(+) create mode 100644 docs/changelog/99474.yaml diff --git a/docs/changelog/99474.yaml b/docs/changelog/99474.yaml new file mode 100644 index 0000000000000..ea23481069833 --- /dev/null +++ b/docs/changelog/99474.yaml @@ -0,0 +1,5 @@ +pr: 99474 +summary: Add `java.net.NetPermission` to APM module's permissions +area: Infra/Core +type: bug +issues: [] diff --git a/modules/apm/src/main/plugin-metadata/plugin-security.policy b/modules/apm/src/main/plugin-metadata/plugin-security.policy index f0032bb291789..b85d3ec05c277 100644 --- a/modules/apm/src/main/plugin-metadata/plugin-security.policy +++ b/modules/apm/src/main/plugin-metadata/plugin-security.policy @@ -26,4 +26,5 @@ grant codeBase "${codebase.elastic-apm-agent}" { permission java.lang.RuntimePermission "getClassLoader"; permission java.io.FilePermission "<>", "read,write"; permission org.elasticsearch.secure_sm.ThreadPermission "modifyArbitraryThreadGroup"; + permission java.net.NetPermission "getProxySelector"; }; From 1f3126b47b50f93a5b6c595cc34c1bd7a94168b3 Mon Sep 17 00:00:00 2001 From: William Brafford Date: Tue, 19 Sep 2023 15:36:21 -0400 Subject: [PATCH 13/27] Add mappings versions to ClusterState.Builder convenience methods (#99551) This is a follow-up to [#99307](https://github.com/elastic/elasticsearch/pull/99307), adjusting convenience methods that used to take `TransportVersion` arguments to account for `MappingsVersion` maps. --- .../allocation/AllocationBenchmark.java | 2 +- .../cluster/ClusterStateDiffIT.java | 7 ++-- .../state/TransportClusterStateAction.java | 2 +- .../elasticsearch/cluster/ClusterState.java | 42 ++++++++++++++++--- .../cluster/coordination/Coordinator.java | 5 ++- .../coordination/NodeJoinExecutor.java | 4 +- .../coordination/NodeLeftExecutor.java | 5 ++- .../TransportVersionsFixupListener.java | 2 +- .../gateway/ClusterStateUpdaters.java | 2 +- .../elasticsearch/indices/SystemIndices.java | 7 ++++ .../reroute/ClusterRerouteResponseTests.java | 14 ++++++- .../cluster/ClusterStateTests.java | 5 ++- .../TransportVersionsFixupListenerTests.java | 14 +++---- .../ClusterStateCreationUtils.java | 3 +- .../test/ClusterServiceUtils.java | 4 +- .../ml/integration/MlAutoUpdateServiceIT.java | 4 +- ...nedModelAssignmentClusterServiceTests.java | 14 +++---- ...rainedModelAssignmentNodeServiceTests.java | 10 ++--- .../security/authc/TokenServiceTests.java | 3 +- .../TransformGetCheckpointTests.java | 2 +- 20 files changed, 102 insertions(+), 49 deletions(-) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java index 9daa5c24f3bd4..a9338d5660b28 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java @@ -152,7 +152,7 @@ public void setUp() throws Exception { .metadata(metadata) .routingTable(routingTable) .nodes(nb) - .compatibilityVersions(compatibilityVersions) + .nodeIdsToCompatibilityVersions(compatibilityVersions) .build(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 373213be479a7..b869b3a90fbce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -51,7 +51,6 @@ import org.elasticsearch.snapshots.SnapshotInfoTestUtils; import org.elasticsearch.snapshots.SnapshotsInProgressSerializationTests; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.VersionUtils; import java.util.Collections; @@ -80,8 +79,8 @@ public void testClusterStateDiffSerialization() throws Exception { DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(masterNode).add(otherNode).localNodeId(masterNode.getId()).build(); ClusterState clusterState = ClusterState.builder(new ClusterName("test")) .nodes(discoveryNodes) - .putTransportVersion("master", TransportVersionUtils.randomVersion(random())) - .putTransportVersion("other", TransportVersionUtils.randomVersion(random())) + .putCompatibilityVersions("master", CompatibilityVersionsUtils.fakeSystemIndicesRandom()) + .putCompatibilityVersions("other", CompatibilityVersionsUtils.fakeSystemIndicesRandom()) .build(); ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes( ClusterState.Builder.toBytes(clusterState), @@ -250,7 +249,7 @@ private ClusterState.Builder randomNodes(ClusterState clusterState) { versions.put(id, CompatibilityVersionsUtils.fakeSystemIndicesRandom()); } - return ClusterState.builder(clusterState).nodes(nodes).compatibilityVersions(versions); + return ClusterState.builder(clusterState).nodes(nodes).nodeIdsToCompatibilityVersions(versions); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index c33bc841190a0..c2684c4becf3c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -151,7 +151,7 @@ private ClusterStateResponse buildResponse(final ClusterStateRequest request, fi if (request.nodes()) { builder.nodes(currentState.nodes()); - builder.compatibilityVersions(getCompatibilityVersions(currentState)); + builder.nodeIdsToCompatibilityVersions(getCompatibilityVersions(currentState)); } if (request.routingTable()) { if (request.indices().length > 0) { diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 603a93ab11f79..95409b5bbf357 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -777,13 +777,35 @@ public DiscoveryNodes nodes() { return nodes; } + // Deprecate to keep downstream projects compiling + @Deprecated(forRemoval = true) public Builder putTransportVersion(String nodeId, TransportVersion transportVersion) { - // TODO[wrb]: system index mappings versions will be added in a followup - compatibilityVersions.put(nodeId, new CompatibilityVersions(Objects.requireNonNull(transportVersion, nodeId), Map.of())); + return putCompatibilityVersions(nodeId, transportVersion, Map.of()); + } + + public Builder putCompatibilityVersions( + String nodeId, + TransportVersion transportVersion, + Map systemIndexMappingsVersions + ) { + return putCompatibilityVersions( + nodeId, + new CompatibilityVersions(Objects.requireNonNull(transportVersion, nodeId), systemIndexMappingsVersions) + ); + } + + public Builder putCompatibilityVersions(String nodeId, CompatibilityVersions versions) { + compatibilityVersions.put(nodeId, versions); return this; } + // Deprecate to keep downstream projects compiling + @Deprecated(forRemoval = true) public Builder compatibilityVersions(Map versions) { + return nodeIdsToCompatibilityVersions(versions); + } + + public Builder nodeIdsToCompatibilityVersions(Map versions) { versions.forEach((key, value) -> Objects.requireNonNull(value, key)); // remove all versions not present in the new map this.compatibilityVersions.keySet().retainAll(versions.keySet()); @@ -923,11 +945,15 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr builder.routingTable = RoutingTable.readFrom(in); builder.nodes = DiscoveryNodes.readFrom(in, localNode); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - builder.compatibilityVersions(in.readMap(CompatibilityVersions::readVersion)); + builder.nodeIdsToCompatibilityVersions(in.readMap(CompatibilityVersions::readVersion)); } else { // this clusterstate is from a pre-8.8.0 node // infer the versions from discoverynodes for now - builder.nodes().getNodes().values().forEach(n -> builder.putTransportVersion(n.getId(), inferTransportVersion(n))); + // leave mappings versions empty + builder.nodes() + .getNodes() + .values() + .forEach(n -> builder.putCompatibilityVersions(n.getId(), inferTransportVersion(n), Map.of())); } builder.blocks = ClusterBlocks.readFrom(in); int customSize = in.readVInt(); @@ -1076,10 +1102,14 @@ public ClusterState apply(ClusterState state) { builder.routingTable(routingTable.apply(state.routingTable)); builder.nodes(nodes.apply(state.nodes)); if (versions != null) { - builder.compatibilityVersions(this.versions.apply(state.compatibilityVersions)); + builder.nodeIdsToCompatibilityVersions(this.versions.apply(state.compatibilityVersions)); } else { // infer the versions from discoverynodes for now - builder.nodes().getNodes().values().forEach(n -> builder.putTransportVersion(n.getId(), inferTransportVersion(n))); + // leave mappings versions empty + builder.nodes() + .getNodes() + .values() + .forEach(n -> builder.putCompatibilityVersions(n.getId(), inferTransportVersion(n), Map.of())); } builder.metadata(metadata.apply(state.metadata)); builder.blocks(blocks.apply(state.blocks)); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 7ccea8e99918b..619a7e09ee651 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.ListenableActionFuture; @@ -175,6 +174,7 @@ public class Coordinator extends AbstractLifecycleComponent implements ClusterSt private final LagDetector lagDetector; private final ClusterFormationFailureHelper clusterFormationFailureHelper; private final JoinReasonService joinReasonService; + private final CompatibilityVersions compatibilityVersions; private Mode mode; private Optional lastKnownLeader; @@ -317,6 +317,7 @@ public Coordinator( this.peerFinderListeners = new CopyOnWriteArrayList<>(); this.peerFinderListeners.add(clusterBootstrapService); this.leaderHeartbeatService = leaderHeartbeatService; + this.compatibilityVersions = compatibilityVersions; } /** @@ -1064,7 +1065,7 @@ protected void doStart() { .addGlobalBlock(noMasterBlockService.getNoMasterBlock()) ) .nodes(DiscoveryNodes.builder().add(getLocalNode()).localNodeId(getLocalNode().getId())) - .putTransportVersion(getLocalNode().getId(), TransportVersion.current()) + .putCompatibilityVersions(getLocalNode().getId(), compatibilityVersions) .metadata(metadata) .build(); applierState = initialState; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java index dd52f20c7355a..170648452d141 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -222,7 +222,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex } final ClusterState clusterStateWithNewNodesAndDesiredNodes = DesiredNodes.updateDesiredNodesStatusIfNeeded( - newState.nodes(nodesBuilder).compatibilityVersions(compatibilityVersionsMap).build() + newState.nodes(nodesBuilder).nodeIdsToCompatibilityVersions(compatibilityVersionsMap).build() ); final ClusterState updatedState = allocationService.adaptAutoExpandReplicas(clusterStateWithNewNodesAndDesiredNodes); assert enforceVersionBarrier == false @@ -295,7 +295,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( // or removed by us above ClusterState tmpState = ClusterState.builder(currentState) .nodes(nodesBuilder) - .compatibilityVersions(compatibilityVersions) + .nodeIdsToCompatibilityVersions(compatibilityVersions) .blocks(ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)) .metadata( Metadata.builder(currentState.metadata()) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java index 68c611aeef9a6..39230d0255ae7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java @@ -107,7 +107,10 @@ protected ClusterState remainingNodesClusterState( DiscoveryNodes.Builder remainingNodesBuilder, Map compatibilityVersions ) { - return ClusterState.builder(currentState).nodes(remainingNodesBuilder).compatibilityVersions(compatibilityVersions).build(); + return ClusterState.builder(currentState) + .nodes(remainingNodesBuilder) + .nodeIdsToCompatibilityVersions(compatibilityVersions) + .build(); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java index 711f0c84136e7..a54130aec95b6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java @@ -117,7 +117,7 @@ public ClusterState execute(BatchExecutionContext cont assert (recordedTv != null) || (context.initialState().nodes().nodeExists(e.getKey()) == false) : "Node " + e.getKey() + " is in the cluster but does not have an associated transport version recorded"; if (Objects.equals(recordedTv, INFERRED_TRANSPORT_VERSION)) { - builder.putTransportVersion(e.getKey(), e.getValue()); + builder.putCompatibilityVersions(e.getKey(), e.getValue(), Map.of()); // unknown mappings versions modified = true; } } diff --git a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java index 2ffadfb26a985..b7826ad17add2 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java +++ b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java @@ -36,7 +36,7 @@ public static ClusterState setLocalNode( ) { return ClusterState.builder(clusterState) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).build()) - .compatibilityVersions(Map.of(localNode.getId(), compatibilityVersions)) + .putCompatibilityVersions(localNode.getId(), compatibilityVersions) .build(); } diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java index 0e8346f57ea1e..1ae53125ea938 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java @@ -123,6 +123,13 @@ public class SystemIndices { new Feature(SYNONYMS_FEATURE_NAME, "Manages synonyms", List.of(SYNONYMS_DESCRIPTOR)) ).collect(Collectors.toUnmodifiableMap(Feature::getName, Function.identity())); + public static final Map SERVER_SYSTEM_MAPPINGS_VERSIONS = + SERVER_SYSTEM_FEATURE_DESCRIPTORS.values() + .stream() + .flatMap(feature -> feature.getIndexDescriptors().stream()) + .filter(SystemIndexDescriptor::isAutomaticallyManaged) + .collect(Collectors.toMap(SystemIndexDescriptor::getIndexPattern, SystemIndexDescriptor::getMappingsVersion)); + /** * The node's full list of system features is stored here. The map is keyed * on the value of {@link Feature#getName()}, and is used for fast lookup of diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index b5ab63140e433..9c61c5d5eeedd 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; @@ -130,7 +131,12 @@ public void testToXContentWithDeprecatedClusterState() { { "node_id": "node0", "transport_version": "8000099", - "mappings_versions": {} + "mappings_versions": { + ".system-index": { + "version": 1, + "hash": 0 + } + } } ], "metadata": { @@ -323,7 +329,11 @@ private static ClusterState createClusterState() { var node0 = DiscoveryNodeUtils.create("node0", new TransportAddress(TransportAddress.META_ADDRESS, 9000)); return ClusterState.builder(new ClusterName("test")) .nodes(new DiscoveryNodes.Builder().add(node0).masterNodeId(node0.getId()).build()) - .putTransportVersion(node0.getId(), TransportVersions.V_8_0_0) + .putCompatibilityVersions( + node0.getId(), + TransportVersions.V_8_0_0, + Map.of(".system-index", new SystemIndexDescriptor.MappingsVersion(1, 0)) + ) .metadata( Metadata.builder() .put( diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 89aacb6f03932..21f8091f65dd4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; @@ -1049,7 +1050,7 @@ private ClusterState buildClusterState() throws IOException { .add(DiscoveryNodeUtils.create("nodeId1", new TransportAddress(InetAddress.getByName("127.0.0.1"), 111))) .build() ) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Map.of( "nodeId1", new CompatibilityVersions(TransportVersion.current(), Map.of(".tasks", new SystemIndexDescriptor.MappingsVersion(1, 1))) @@ -1159,7 +1160,7 @@ public void testGetMinTransportVersion() throws IOException { for (int i = 0; i < numNodes; i++) { TransportVersion tv = TransportVersionUtils.randomVersion(); - builder.putTransportVersion("nodeTv" + i, tv); + builder.putCompatibilityVersions("nodeTv" + i, tv, SystemIndices.SERVER_SYSTEM_MAPPINGS_VERSIONS); minVersion = Collections.min(List.of(minVersion, tv)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java index 3c8540c7771c6..323c50bf23c3b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java @@ -113,7 +113,7 @@ public void testNothingFixedWhenNothingToInfer() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(Version.V_8_8_0)) - .compatibilityVersions(versions(new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of()))) + .nodeIdsToCompatibilityVersions(versions(new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of()))) .build(); TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); @@ -128,7 +128,7 @@ public void testNothingFixedWhenOnNextVersion() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION)) - .compatibilityVersions(versions(new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of()))) + .nodeIdsToCompatibilityVersions(versions(new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of()))) .build(); TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); @@ -143,7 +143,7 @@ public void testNothingFixedWhenOnPreviousVersion() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(Version.V_8_7_0, Version.V_8_8_0)) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Maps.transformValues( versions(TransportVersions.V_8_7_0, TransportVersions.V_8_8_0), transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) @@ -164,7 +164,7 @@ public void testVersionsAreFixed() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Maps.transformValues( versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) @@ -193,7 +193,7 @@ public void testConcurrentChangesDoNotOverlap() { ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Maps.transformValues( versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) @@ -208,7 +208,7 @@ public void testConcurrentChangesDoNotOverlap() { ClusterState testState2 = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Maps.transformValues( versions(NEXT_TRANSPORT_VERSION, NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0), transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) @@ -229,7 +229,7 @@ public void testFailedRequestsAreRetried() { ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Maps.transformValues( versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) diff --git a/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java index 853fec20e6aef..c40df091a4521 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import java.util.ArrayList; @@ -535,7 +536,7 @@ public static ClusterState state( ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); state.nodes(discoBuilder); for (DiscoveryNode node : allNodes) { - state.putTransportVersion(node.getId(), transportVersion); + state.putCompatibilityVersions(node.getId(), transportVersion, SystemIndices.SERVER_SYSTEM_MAPPINGS_VERSIONS); } Metadata.Builder metadataBuilder = Metadata.builder().generateClusterUuidIfNeeded(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java index 80c1f0e700e62..20625cce6d24c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.util.Throwables; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterName; @@ -28,6 +27,7 @@ import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -126,7 +126,7 @@ public static ClusterService createClusterService(ThreadPool threadPool, Discove clusterService.setNodeConnectionsService(createNoOpNodeConnectionsService()); ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterServiceUtils.class.getSimpleName())) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) - .putTransportVersion(localNode.getId(), TransportVersion.current()) + .putCompatibilityVersions(localNode.getId(), CompatibilityVersionsUtils.staticCurrent()) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); clusterService.getClusterApplierService().setInitialState(initialClusterState); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java index 5059ec50708e2..2e1a1829c0244 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.ml.integration; -import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -18,6 +17,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.xcontent.XContentType; @@ -111,7 +111,7 @@ public void testAutomaticModelUpdate() throws Exception { .masterNodeId("node_id") .build() ) - .putTransportVersion("node_id", TransportVersion.current()) + .putCompatibilityVersions("node_id", CompatibilityVersionsUtils.staticCurrent()) .build(), ClusterState.builder(new ClusterName("test")).build() ); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java index 9b79754a5afe9..d37edcd85946a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; @@ -27,6 +26,7 @@ import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -124,7 +124,7 @@ public void testUpdateModelRoutingTable() { .add(buildNode(startedNode, true, ByteSizeValue.ofGb(4).getBytes(), 8)) .build() ) - .putTransportVersion(nodeId, TransportVersion.current()) + .putCompatibilityVersions(nodeId, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( @@ -238,7 +238,7 @@ public void testRemoveAssignment() { ClusterState clusterStateWithAssignment = ClusterState.builder(new ClusterName("testRemoveAssignment")) .nodes(DiscoveryNodes.builder().add(buildNode("test-node", true, ByteSizeValue.ofGb(4).getBytes(), 8)).build()) - .putTransportVersion("test-node", TransportVersion.current()) + .putCompatibilityVersions("test-node", CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( @@ -270,7 +270,7 @@ public void testRemoveAllAssignments() { ClusterState clusterStateWithAssignments = ClusterState.builder(new ClusterName("testRemoveAllAssignments")) .nodes(DiscoveryNodes.builder().add(buildNode("test-node", true, ByteSizeValue.ofGb(4).getBytes(), 8)).build()) - .putTransportVersion("test-node", TransportVersion.current()) + .putCompatibilityVersions("test-node", CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadataTests.randomInstance()) @@ -367,7 +367,7 @@ public void testCreateAssignmentWhileResetModeIsTrue() throws InterruptedExcepti ClusterState currentState = ClusterState.builder(new ClusterName("testCreateAssignment")) .nodes(discoveryNodes) - .putTransportVersion("ml-node-with-room", TransportVersion.current()) + .putCompatibilityVersions("ml-node-with-room", CompatibilityVersionsUtils.staticCurrent()) .metadata(Metadata.builder().putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isResetMode(true).build())) .build(); when(clusterService.state()).thenReturn(currentState); @@ -1768,7 +1768,7 @@ private static ClusterState createClusterState(List nodeIds, Metadata me .toArray(DiscoveryNode[]::new); ClusterState.Builder csBuilder = csBuilderWithNodes("test", nodes); - nodeIds.forEach(id -> csBuilder.putTransportVersion(id, TransportVersion.current())); + nodeIds.forEach(id -> csBuilder.putCompatibilityVersions(id, CompatibilityVersionsUtils.staticCurrent())); return csBuilder.metadata(metadata).build(); } @@ -1810,7 +1810,7 @@ public void testSetAllocationToStopping() { ClusterState clusterStateWithAllocation = ClusterState.builder(new ClusterName("testSetAllocationToStopping")) .nodes(DiscoveryNodes.builder().add(buildNode("test-node", true, ByteSizeValue.ofGb(4).getBytes(), 8)).build()) - .putTransportVersion("test-node", TransportVersion.current()) + .putCompatibilityVersions("test-node", CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java index b0903cf47dc88..0bd2e716758e4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.ml.inference.assignment; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.ShardSearchFailure; @@ -21,6 +20,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; @@ -569,7 +569,7 @@ public void testClusterChanged() throws Exception { "testClusterChanged", ClusterState.builder(new ClusterName("testClusterChanged")) .nodes(nodes) - .putTransportVersion(NODE_ID, TransportVersion.current()) + .putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( @@ -627,7 +627,7 @@ public void testClusterChanged() throws Exception { "testClusterChanged", ClusterState.builder(new ClusterName("testClusterChanged")) .nodes(nodes) - .putTransportVersion(NODE_ID, TransportVersion.current()) + .putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( @@ -680,7 +680,7 @@ public void testClusterChanged() throws Exception { "testClusterChanged", ClusterState.builder(new ClusterName("testClusterChanged")) .nodes(nodes) - .putTransportVersion(NODE_ID, TransportVersion.current()) + .putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( @@ -721,7 +721,7 @@ public void testClusterChanged_GivenAllStartedAssignments_AndNonMatchingTargetAl "shouldUpdateAllocations", ClusterState.builder(new ClusterName("shouldUpdateAllocations")) .nodes(nodes) - .putTransportVersion(NODE_ID, TransportVersion.current()) + .putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 5faeb02f7029f..4c276993381b5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -60,6 +60,7 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.node.Node; import org.elasticsearch.rest.RestStatus; @@ -1271,7 +1272,7 @@ private static DiscoveryNode addAnotherDataNodeWithVersion( discoBuilder.add(anotherDataNode); final ClusterState.Builder newStateBuilder = ClusterState.builder(currentState); newStateBuilder.nodes(discoBuilder); - newStateBuilder.putTransportVersion(anotherDataNode.getId(), transportVersion); + newStateBuilder.putCompatibilityVersions(anotherDataNode.getId(), transportVersion, SystemIndices.SERVER_SYSTEM_MAPPINGS_VERSIONS); setState(clusterService, newStateBuilder.build()); return anotherDataNode; } diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java index 169ae7b04787f..521a1deafe797 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java @@ -125,7 +125,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req testIndices = testIndicesList.toArray(new String[0]); clusterStateWithIndex = ClusterState.builder(ClusterStateCreationUtils.state(numberOfNodes, testIndices, numberOfShards)) - .putTransportVersion("node01", TransportVersions.V_8_5_0) + .putCompatibilityVersions("node01", TransportVersions.V_8_5_0, Map.of()) .build(); transformTask = new Task( From a6cb15c0bc2d869b2bc91a259c61e735fccfc206 Mon Sep 17 00:00:00 2001 From: Dianna Hohensee Date: Tue, 19 Sep 2023 16:46:16 -0400 Subject: [PATCH 14/27] Chunk the cluster allocation explain response (#99641) Chunking the response can save potentially MBs of heap space. Leverages ClusterInfo already being chunked (as of #97800). Chunks the remainder of ClusterAllocationExplanation, and updates the RestClusterAllocationExplainAction to return a chunked response. Resolves #97803 --- docs/changelog/99641.yaml | 5 + .../ClusterAllocationExplainIT.java | 8 +- .../ClusterAllocationExplainResponse.java | 10 +- .../ClusterAllocationExplanation.java | 92 ++++++++++--------- .../ShutdownShardMigrationStatus.java | 4 +- .../AbstractAllocationDecision.java | 32 ++++--- .../AllocateUnassignedDecision.java | 51 +++++----- .../routing/allocation/MoveDecision.java | 61 ++++++------ .../allocation/ShardAllocationDecision.java | 22 ++--- .../xcontent/ChunkedToXContentHelper.java | 6 ++ .../RestClusterAllocationExplainAction.java | 17 +--- .../ClusterAllocationExplainActionTests.java | 3 +- .../ClusterAllocationExplanationTests.java | 9 +- 13 files changed, 177 insertions(+), 143 deletions(-) create mode 100644 docs/changelog/99641.yaml diff --git a/docs/changelog/99641.yaml b/docs/changelog/99641.yaml new file mode 100644 index 0000000000000..c74f7380bd93a --- /dev/null +++ b/docs/changelog/99641.yaml @@ -0,0 +1,5 @@ +pr: 99641 +summary: Chunk the cluster allocation explain response +area: Network +type: enhancement +issues: [97803] diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index f201685294058..ba1a8b7919963 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESIntegTestCase; @@ -1236,10 +1237,7 @@ private ClusterAllocationExplanation runExplain(boolean primary, String nodeId, .get() .getExplanation(); if (logger.isDebugEnabled()) { - XContentBuilder builder = JsonXContent.contentBuilder(); - builder.prettyPrint(); - builder.humanReadable(true); - logger.debug("--> explain json output: \n{}", Strings.toString(explanation.toXContent(builder, ToXContent.EMPTY_PARAMS))); + logger.debug("--> explain json output: \n{}", Strings.toString(explanation, true, true)); } return explanation; } @@ -1304,7 +1302,7 @@ private DiscoveryNode replicaNode() { private XContentParser getParser(ClusterAllocationExplanation explanation) throws IOException { XContentBuilder builder = JsonXContent.contentBuilder(); - return createParser(explanation.toXContent(builder, ToXContent.EMPTY_PARAMS)); + return createParser(ChunkedToXContent.wrapAsToXContent(explanation).toXContent(builder, ToXContent.EMPTY_PARAMS)); } private void verifyShardInfo(XContentParser parser, boolean primary, boolean includeDiskInfo, ShardRoutingState state) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java index 89f40d407bb49..39baf25f5dada 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java @@ -11,13 +11,16 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.util.Iterator; /** * Explanation response for a shard in the cluster */ -public class ClusterAllocationExplainResponse extends ActionResponse { +public class ClusterAllocationExplainResponse extends ActionResponse implements ChunkedToXContentObject { private ClusterAllocationExplanation cae; @@ -41,4 +44,9 @@ public ClusterAllocationExplanation getExplanation() { public void writeTo(StreamOutput out) throws IOException { cae.writeTo(out); } + + @Override + public Iterator toXContentChunked(ToXContent.Params params) { + return cae.toXContentChunked(params); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java index 34c1bb4a0c85f..d22bae9c5a4b1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java @@ -16,27 +16,32 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationDecision; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.time.Instant; +import java.util.Collections; +import java.util.Iterator; import java.util.Locale; import static org.elasticsearch.cluster.routing.allocation.AbstractAllocationDecision.discoveryNodeToXContent; +import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk; /** * A {@code ClusterAllocationExplanation} is an explanation of why a shard is unassigned, * or if it is not unassigned, then which nodes it could possibly be relocated to. * It is an immutable class. */ -public final class ClusterAllocationExplanation implements ToXContentObject, Writeable { +public final class ClusterAllocationExplanation implements ChunkedToXContentObject, Writeable { static final String NO_SHARD_SPECIFIED_MESSAGE = "No shard was specified in the explain API request, so this response " + "explains a randomly chosen unassigned shard. There may be other unassigned shards in this cluster which cannot be assigned for " @@ -156,9 +161,10 @@ public ShardAllocationDecision getShardAllocationDecision() { return shardAllocationDecision; } - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat(singleChunk((builder, p) -> { + builder.startObject(); + if (isSpecificShard() == false) { builder.field("note", NO_SHARD_SPECIFIED_MESSAGE); } @@ -169,48 +175,52 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (shardRouting.unassignedInfo() != null) { unassignedInfoToXContent(shardRouting.unassignedInfo(), builder); } - if (currentNode != null) { + + if (this.currentNode != null) { builder.startObject("current_node"); - { - discoveryNodeToXContent(currentNode, true, builder); - if (shardAllocationDecision.getMoveDecision().isDecisionTaken() - && shardAllocationDecision.getMoveDecision().getCurrentNodeRanking() > 0) { - builder.field("weight_ranking", shardAllocationDecision.getMoveDecision().getCurrentNodeRanking()); - } + discoveryNodeToXContent(this.currentNode, true, builder); + if (shardAllocationDecision.getMoveDecision().isDecisionTaken() + && shardAllocationDecision.getMoveDecision().getCurrentNodeRanking() > 0) { + builder.field("weight_ranking", shardAllocationDecision.getMoveDecision().getCurrentNodeRanking()); } builder.endObject(); } - if (this.clusterInfo != null) { - builder.startObject("cluster_info"); - { - // This field might be huge, TODO add chunking support here - ChunkedToXContent.wrapAsToXContent(clusterInfo).toXContent(builder, params); - } - builder.endObject(); // end "cluster_info" - } - if (shardAllocationDecision.isDecisionTaken()) { - shardAllocationDecision.toXContent(builder, params); + + return builder; + }), + this.clusterInfo != null + ? Iterators.concat( + ChunkedToXContentHelper.startObject("cluster_info"), + this.clusterInfo.toXContentChunked(params), + ChunkedToXContentHelper.endObject() + ) + : Collections.emptyIterator(), + getShardAllocationDecisionChunked(params), + Iterators.single((builder, p) -> builder.endObject()) + ); + } + + private Iterator getShardAllocationDecisionChunked(ToXContent.Params params) { + if (shardAllocationDecision.isDecisionTaken()) { + return shardAllocationDecision.toXContentChunked(params); + } else { + String explanation; + if (shardRouting.state() == ShardRoutingState.RELOCATING) { + explanation = "the shard is in the process of relocating from node [" + + currentNode.getName() + + "] " + + "to node [" + + relocationTargetNode.getName() + + "], wait until relocation has completed"; } else { - String explanation; - if (shardRouting.state() == ShardRoutingState.RELOCATING) { - explanation = "the shard is in the process of relocating from node [" - + currentNode.getName() - + "] " - + "to node [" - + relocationTargetNode.getName() - + "], wait until relocation has completed"; - } else { - assert shardRouting.state() == ShardRoutingState.INITIALIZING; - explanation = "the shard is in the process of initializing on node [" - + currentNode.getName() - + "], " - + "wait until initialization has completed"; - } - builder.field("explanation", explanation); + assert shardRouting.state() == ShardRoutingState.INITIALIZING; + explanation = "the shard is in the process of initializing on node [" + + currentNode.getName() + + "], " + + "wait until initialization has completed"; } + return Iterators.single((builder, p) -> builder.field("explanation", explanation)); } - builder.endObject(); // end wrapping object - return builder; } private static XContentBuilder unassignedInfoToXContent(UnassignedInfo unassignedInfo, XContentBuilder builder) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java index 5597f3359a1c7..417d6eb14d0b0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -88,7 +89,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (Objects.nonNull(allocationDecision)) { builder.startObject(NODE_ALLOCATION_DECISION_KEY); { - allocationDecision.toXContent(builder, params); + // This field might be huge, TODO add chunking support here + ChunkedToXContent.wrapAsToXContent(allocationDecision).toXContent(builder, params); } builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AbstractAllocationDecision.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AbstractAllocationDecision.java index 11596f9420709..e81904021d3cf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AbstractAllocationDecision.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AbstractAllocationDecision.java @@ -11,14 +11,19 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -26,7 +31,7 @@ /** * An abstract class for representing various types of allocation decisions. */ -public abstract class AbstractAllocationDecision implements ToXContentFragment, Writeable { +public abstract class AbstractAllocationDecision implements ChunkedToXContentObject, Writeable { @Nullable protected final DiscoveryNode targetNode; @@ -127,22 +132,19 @@ public static List sortNodeDecisions(List nodeDecisions, XContentBuilder builder, Params params) - throws IOException { - - if (nodeDecisions != null && nodeDecisions.isEmpty() == false) { - builder.startArray("node_allocation_decisions"); - { - for (NodeAllocationResult explanation : nodeDecisions) { - explanation.toXContent(builder, params); - } - } - builder.endArray(); + public static Iterator nodeDecisionsToXContentChunked(List nodeDecisions) { + if (nodeDecisions == null || nodeDecisions.isEmpty()) { + return Collections.emptyIterator(); } - return builder; + + return Iterators.concat( + ChunkedToXContentHelper.startArray("node_allocation_decisions"), + nodeDecisions.iterator(), + ChunkedToXContentHelper.endArray() + ); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java index 1d7e0e4a1edbb..d7bcacd3a0cde 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java @@ -12,15 +12,17 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; import java.util.Collections; import java.util.EnumMap; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -291,28 +293,33 @@ && getNodeDecisions().stream() } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public Iterator toXContentChunked(ToXContent.Params params) { checkDecisionState(); - builder.field("can_allocate", getAllocationDecision()); - builder.field("allocate_explanation", getExplanation()); - if (targetNode != null) { - builder.startObject("target_node"); - discoveryNodeToXContent(targetNode, true, builder); - builder.endObject(); - } - if (allocationId != null) { - builder.field("allocation_id", allocationId); - } - if (allocationStatus == AllocationStatus.DELAYED_ALLOCATION) { - builder.humanReadableField( - "configured_delay_in_millis", - "configured_delay", - TimeValue.timeValueMillis(configuredDelayInMillis) - ); - builder.humanReadableField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayInMillis)); - } - nodeDecisionsToXContent(nodeDecisions, builder, params); - return builder; + return Iterators.concat(Iterators.single((builder, p) -> { + builder.field("can_allocate", getAllocationDecision()); + builder.field("allocate_explanation", getExplanation()); + if (targetNode != null) { + builder.startObject("target_node"); + discoveryNodeToXContent(targetNode, true, builder); + builder.endObject(); + } + if (allocationId != null) { + builder.field("allocation_id", allocationId); + } + if (allocationStatus == AllocationStatus.DELAYED_ALLOCATION) { + builder.humanReadableField( + "configured_delay_in_millis", + "configured_delay", + TimeValue.timeValueMillis(configuredDelayInMillis) + ); + builder.humanReadableField( + "remaining_delay_in_millis", + "remaining_delay", + TimeValue.timeValueMillis(remainingDelayInMillis) + ); + } + return builder; + }), nodeDecisionsToXContentChunked(nodeDecisions)); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java index 69eacb70f057b..3819805316f26 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java @@ -11,12 +11,14 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.util.Iterator; import java.util.List; import java.util.Objects; @@ -282,37 +284,38 @@ public String getExplanation() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public Iterator toXContentChunked(ToXContent.Params params) { checkDecisionState(); - if (targetNode != null) { - builder.startObject("target_node"); - discoveryNodeToXContent(targetNode, true, builder); - builder.endObject(); - } - builder.field("can_remain_on_current_node", canRemain() ? "yes" : "no"); - if (canRemain() == false && canRemainDecision.getDecisions().isEmpty() == false) { - builder.startArray("can_remain_decisions"); - canRemainDecision.toXContent(builder, params); - builder.endArray(); - } - if (clusterRebalanceDecision != null) { - AllocationDecision rebalanceDecision = AllocationDecision.fromDecisionType(clusterRebalanceDecision.type()); - builder.field("can_rebalance_cluster", rebalanceDecision); - if (rebalanceDecision != AllocationDecision.YES && clusterRebalanceDecision.getDecisions().isEmpty() == false) { - builder.startArray("can_rebalance_cluster_decisions"); - clusterRebalanceDecision.toXContent(builder, params); + return Iterators.concat(Iterators.single((builder, p) -> { + if (targetNode != null) { + builder.startObject("target_node"); + discoveryNodeToXContent(targetNode, true, builder); + builder.endObject(); + } + builder.field("can_remain_on_current_node", canRemain() ? "yes" : "no"); + if (canRemain() == false && canRemainDecision.getDecisions().isEmpty() == false) { + builder.startArray("can_remain_decisions"); + canRemainDecision.toXContent(builder, params); builder.endArray(); } - } - if (clusterRebalanceDecision != null) { - builder.field("can_rebalance_to_other_node", allocationDecision); - builder.field("rebalance_explanation", getExplanation()); - } else { - builder.field("can_move_to_other_node", forceMove() ? "yes" : "no"); - builder.field("move_explanation", getExplanation()); - } - nodeDecisionsToXContent(nodeDecisions, builder, params); - return builder; + if (clusterRebalanceDecision != null) { + AllocationDecision rebalanceDecision = AllocationDecision.fromDecisionType(clusterRebalanceDecision.type()); + builder.field("can_rebalance_cluster", rebalanceDecision); + if (rebalanceDecision != AllocationDecision.YES && clusterRebalanceDecision.getDecisions().isEmpty() == false) { + builder.startArray("can_rebalance_cluster_decisions"); + clusterRebalanceDecision.toXContent(builder, params); + builder.endArray(); + } + } + if (clusterRebalanceDecision != null) { + builder.field("can_rebalance_to_other_node", allocationDecision); + builder.field("rebalance_explanation", getExplanation()); + } else { + builder.field("can_move_to_other_node", forceMove() ? "yes" : "no"); + builder.field("move_explanation", getExplanation()); + } + return builder; + }), nodeDecisionsToXContentChunked(nodeDecisions)); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java index 30d033ba5d431..7f184386ec367 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java @@ -8,13 +8,16 @@ package org.elasticsearch.cluster.routing.allocation; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.xcontent.ToXContentFragment; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.util.Collections; +import java.util.Iterator; /** * Represents the decision taken for the allocation of a single shard. If @@ -29,7 +32,7 @@ * then both {@link #getAllocateDecision()} and {@link #getMoveDecision()} will return * objects whose {@code isDecisionTaken()} method returns {@code false}. */ -public final class ShardAllocationDecision implements ToXContentFragment, Writeable { +public final class ShardAllocationDecision implements ChunkedToXContentObject, Writeable { public static final ShardAllocationDecision NOT_TAKEN = new ShardAllocationDecision( AllocateUnassignedDecision.NOT_TAKEN, MoveDecision.NOT_TAKEN @@ -82,14 +85,11 @@ public MoveDecision getMoveDecision() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (allocateDecision.isDecisionTaken()) { - allocateDecision.toXContent(builder, params); - } - if (moveDecision.isDecisionTaken()) { - moveDecision.toXContent(builder, params); - } - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat( + allocateDecision.isDecisionTaken() ? allocateDecision.toXContentChunked(params) : Collections.emptyIterator(), + moveDecision.isDecisionTaken() ? moveDecision.toXContentChunked(params) : Collections.emptyIterator() + ); } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java index aaff9e249af0f..ce8af443a9789 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java @@ -82,6 +82,12 @@ private static Iterator map(String name, Map map, Fun return wrapWithObject(name, Iterators.map(map.entrySet().iterator(), toXContent)); } + /** + * Creates an Iterator of a single ToXContent object that serializes all the given 'contents' ToXContent objects into a single chunk. + * + * @param contents ToXContent objects supporting toXContent() calls. + * @return Iterator of a single ToXContent object serializing all the ToXContent "contents". + */ public static Iterator singleChunk(ToXContent... contents) { return Iterators.single((builder, params) -> { for (ToXContent content : contents) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java index 88fd1b610ba1d..896c341953e73 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java @@ -9,17 +9,12 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; -import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.rest.action.RestChunkedToXContentListener; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -63,14 +58,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC req.includeYesDecisions(request.paramAsBoolean("include_yes_decisions", false)); req.includeDiskInfo(request.paramAsBoolean("include_disk_info", false)); - return channel -> client.admin() - .cluster() - .allocationExplain(req, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(ClusterAllocationExplainResponse response, XContentBuilder builder) throws IOException { - response.getExplanation().toXContent(builder, ToXContent.EMPTY_PARAMS); - return new RestResponse(RestStatus.OK, builder); - } - }); + return channel -> client.admin().cluster().allocationExplain(req, new RestChunkedToXContentListener<>(channel)); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java index 70df808ae1f08..f68e83e13496c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.gateway.TestGatewayAllocator; @@ -88,7 +89,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing assertFalse(cae.getShardAllocationDecision().getAllocateDecision().isDecisionTaken()); assertFalse(cae.getShardAllocationDecision().getMoveDecision().isDecisionTaken()); XContentBuilder builder = XContentFactory.jsonBuilder(); - cae.toXContent(builder, ToXContent.EMPTY_PARAMS); + ChunkedToXContent.wrapAsToXContent(cae).toXContent(builder, ToXContent.EMPTY_PARAMS); String explanation; if (shardRoutingState == ShardRoutingState.RELOCATING) { explanation = "the shard is in the process of relocating from node [] to node [], wait until " + "relocation has completed"; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java index 4243a943c6761..6ade8fc184ed9 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java @@ -23,9 +23,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -81,8 +83,10 @@ public void testExplanationSerialization() throws Exception { public void testExplanationToXContent() throws Exception { ClusterAllocationExplanation cae = randomClusterAllocationExplanation(true, true); + AbstractChunkedSerializingTestCase.assertChunkCount(cae, ignored -> 3); + XContentBuilder builder = XContentFactory.jsonBuilder(); - cae.toXContent(builder, ToXContent.EMPTY_PARAMS); + ChunkedToXContent.wrapAsToXContent(cae).toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals(XContentHelper.stripWhitespace(Strings.format(""" { "index": "idx", @@ -105,8 +109,9 @@ public void testExplanationToXContent() throws Exception { public void testRandomShardExplanationToXContent() throws Exception { ClusterAllocationExplanation cae = randomClusterAllocationExplanation(true, false); + AbstractChunkedSerializingTestCase.assertChunkCount(cae, ignored -> 3); XContentBuilder builder = XContentFactory.jsonBuilder(); - cae.toXContent(builder, ToXContent.EMPTY_PARAMS); + ChunkedToXContent.wrapAsToXContent(cae).toXContent(builder, ToXContent.EMPTY_PARAMS); final String actual = Strings.toString(builder); assertThat( actual, From 5b17f1b99b105ec476312f8997c5ff0abc9b1be8 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 19 Sep 2023 14:01:16 -0700 Subject: [PATCH 15/27] Use ESQL threadpool for inactive exchange reaper (#99681) Reaping inactive exchanges should be lightweight and can be executed using the scheduler threadpool. However, since the scheduler is a single-core threadpool used by other critical tasks like periodic refresh, flushing, and translog sync, it's safer to run this task using the ESQL threadpool to prevent potential issues. Additionally, considering that this task runs every 5 minutes, the overhead is trivial. Closes #99384 --- .../compute/operator/exchange/ExchangeService.java | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java index 6db4a8c4fe37d..08e793e43c612 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractAsyncTask; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -34,6 +33,7 @@ import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.Transports; import java.io.IOException; import java.util.Map; @@ -73,7 +73,7 @@ public ExchangeService(Settings settings, ThreadPool threadPool, String executor this.requestExecutorName = executorName; this.responseExecutor = threadPool.executor(executorName); final var inactiveInterval = settings.getAsTime(INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMinutes(5)); - this.inactiveSinksReaper = new InactiveSinksReaper(LOGGER, threadPool, inactiveInterval); + this.inactiveSinksReaper = new InactiveSinksReaper(LOGGER, threadPool, this.responseExecutor, inactiveInterval); } public void registerTransportHandler(TransportService transportService) { @@ -211,8 +211,8 @@ public void messageReceived(ExchangeRequest request, TransportChannel channel, T } private final class InactiveSinksReaper extends AbstractAsyncTask { - InactiveSinksReaper(Logger logger, ThreadPool threadPool, TimeValue interval) { - super(logger, threadPool, EsExecutors.DIRECT_EXECUTOR_SERVICE, interval, true); + InactiveSinksReaper(Logger logger, ThreadPool threadPool, Executor executor, TimeValue interval) { + super(logger, threadPool, executor, interval, true); rescheduleIfNecessary(); } @@ -224,6 +224,8 @@ protected boolean mustReschedule() { @Override protected void runInternal() { + assert Transports.assertNotTransportThread("reaping inactive exchanges can be expensive"); + assert ThreadPool.assertNotScheduleThread("reaping inactive exchanges can be expensive"); final TimeValue maxInterval = getInterval(); final long nowInMillis = threadPool.relativeTimeInMillis(); for (Map.Entry e : sinks.entrySet()) { From 1fd17b8db85cdb3174952cfafe6c6466f91044d7 Mon Sep 17 00:00:00 2001 From: William Brafford Date: Tue, 19 Sep 2023 23:31:07 -0400 Subject: [PATCH 16/27] Use mappings version to retrieve system index mappings at creation time (#99555) When creating system indices, we need to use mappings versions instead of Version. Now that these version numbers are available in cluster state (#99307), we can use them in TransportCreateIndexAction and AutoCreateAction. There is also a slight change in logic. Previously, we retrieved a descriptor that matched the minimum node version for "non-client" nodes, which in practice meant master-eligible and data nodes. We're changing that logic here because nodes are allowed to join the cluster if they match the minimum system index mappings for the cluster, regardless of node type. Here, we make a more conservative check so that we don't upgrade mappings past what any potential node could handle. --- docs/changelog/99555.yaml | 5 ++ .../indices/create/AutoCreateAction.java | 4 +- .../create/TransportCreateIndexAction.java | 4 +- .../indices/SystemIndexDescriptor.java | 42 ++++++++++++++ .../TransportCreateIndexActionTests.java | 39 ++++++++++++- .../indices/SystemIndexDescriptorTests.java | 55 ++++++++++++------- 6 files changed, 122 insertions(+), 27 deletions(-) create mode 100644 docs/changelog/99555.yaml diff --git a/docs/changelog/99555.yaml b/docs/changelog/99555.yaml new file mode 100644 index 0000000000000..5e53e8782e08c --- /dev/null +++ b/docs/changelog/99555.yaml @@ -0,0 +1,5 @@ +pr: 99555 +summary: Use mappings version to retrieve system index mappings at creation time +area: Infra/Core +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index 913c16fa33c46..c62b689d58e78 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -283,10 +283,10 @@ ClusterState execute( if (isManagedSystemIndex) { final SystemIndexDescriptor descriptor = mainDescriptor.getDescriptorCompatibleWith( - currentState.nodes().getSmallestNonClientNodeVersion() + currentState.getMinSystemIndexMappingVersions().get(mainDescriptor.getPrimaryIndex()) ); if (descriptor == null) { - final String message = mainDescriptor.getMinimumNodeVersionMessage("auto-create index"); + final String message = mainDescriptor.getMinimumMappingsVersionMessage("auto-create index"); logger.warn(message); throw new IllegalStateException(message); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 3e72500cc016b..3deb70df92d88 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -132,10 +132,10 @@ protected void masterOperation( // the index to the latest settings. if (isManagedSystemIndex && Strings.isNullOrEmpty(request.origin())) { final SystemIndexDescriptor descriptor = mainDescriptor.getDescriptorCompatibleWith( - state.nodes().getSmallestNonClientNodeVersion() + state.getMinSystemIndexMappingVersions().get(mainDescriptor.getPrimaryIndex()) ); if (descriptor == null) { - final String message = mainDescriptor.getMinimumNodeVersionMessage("create index"); + final String message = mainDescriptor.getMinimumMappingsVersionMessage("create index"); logger.warn(message); listener.onFailure(new IllegalStateException(message)); return; diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java b/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java index d6441a2920f43..98cea47a94a5d 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java @@ -550,6 +550,27 @@ public MappingsVersion getMappingsVersion() { * @param cause the action being attempted that triggered the check. Used in the error message. * @return the standardized error message */ + public String getMinimumMappingsVersionMessage(String cause) { + Objects.requireNonNull(cause); + final MappingsVersion actualMinimumMappingsVersion = priorSystemIndexDescriptors.isEmpty() + ? getMappingsVersion() + : priorSystemIndexDescriptors.get(priorSystemIndexDescriptors.size() - 1).mappingsVersion; + return Strings.format( + "[%s] failed - system index [%s] requires all data and master nodes to have mappings versions at least of version [%s]", + cause, + this.getPrimaryIndex(), + actualMinimumMappingsVersion + ); + } + + /** + * Gets a standardized message when the node contains a data or master node whose version is less + * than that of the minimum supported version of this descriptor and its prior descriptors. + * + * @param cause the action being attempted that triggered the check. Used in the error message. + * @return the standardized error message + */ + @Deprecated public String getMinimumNodeVersionMessage(String cause) { Objects.requireNonNull(cause); final Version actualMinimumVersion = priorSystemIndexDescriptors.isEmpty() @@ -572,6 +593,7 @@ public String getMinimumNodeVersionMessage(String cause) { * @return null if the lowest node version is lower than the minimum version in this descriptor, * or the appropriate descriptor if the supplied version is acceptable. */ + @Deprecated public SystemIndexDescriptor getDescriptorCompatibleWith(Version version) { if (minimumNodeVersion.onOrBefore(version)) { return this; @@ -584,6 +606,26 @@ public SystemIndexDescriptor getDescriptorCompatibleWith(Version version) { return null; } + /** + * Finds the descriptor that can be used within this cluster, by comparing the supplied minimum + * node version to this descriptor's minimum version and the prior descriptors minimum version. + * + * @param version the lower node version in the cluster + * @return null if the lowest node version is lower than the minimum version in this descriptor, + * or the appropriate descriptor if the supplied version is acceptable. + */ + public SystemIndexDescriptor getDescriptorCompatibleWith(MappingsVersion version) { + if (Objects.requireNonNull(version).version() >= mappingsVersion.version()) { + return this; + } + for (SystemIndexDescriptor prior : priorSystemIndexDescriptors) { + if (version.version() >= prior.mappingsVersion.version()) { + return prior; + } + } + return null; + } + /** * @return The names of thread pools that should be used for operations on this * system index. diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java index d68641d04dd74..a53755bfcec7b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.create; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.ActionFilters; @@ -17,8 +18,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndices; @@ -30,8 +37,11 @@ import org.junit.Before; import org.mockito.ArgumentCaptor; +import java.net.InetAddress; import java.util.List; import java.util.Locale; +import java.util.Map; +import java.util.Set; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_HIDDEN; import static org.hamcrest.Matchers.equalTo; @@ -42,13 +52,36 @@ public class TransportCreateIndexActionTests extends ESTestCase { + private static final String UNMANAGED_SYSTEM_INDEX_NAME = ".my-system"; + private static final String MANAGED_SYSTEM_INDEX_NAME = ".my-managed"; + private static final String SYSTEM_ALIAS_NAME = ".my-alias"; private static final ClusterState CLUSTER_STATE = ClusterState.builder(new ClusterName("test")) .metadata(Metadata.builder().build()) + .nodes( + DiscoveryNodes.builder() + .add( + new DiscoveryNode( + "node-1", + "node-1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + Map.of(), + Set.of(DiscoveryNodeRole.DATA_ROLE), + VersionInformation.CURRENT + ) + ) + .build() + ) + .compatibilityVersions( + Map.of( + "node-1", + new CompatibilityVersions( + TransportVersion.current(), + Map.of(MANAGED_SYSTEM_INDEX_NAME + "-primary", new SystemIndexDescriptor.MappingsVersion(1, 1)) + ) + ) + ) .build(); - private static final String UNMANAGED_SYSTEM_INDEX_NAME = ".my-system"; - private static final String MANAGED_SYSTEM_INDEX_NAME = ".my-managed"; - private static final String SYSTEM_ALIAS_NAME = ".my-alias"; private static final SystemIndices SYSTEM_INDICES = new SystemIndices( List.of( new SystemIndices.Feature( diff --git a/server/src/test/java/org/elasticsearch/indices/SystemIndexDescriptorTests.java b/server/src/test/java/org/elasticsearch/indices/SystemIndexDescriptorTests.java index 1a372de6129a3..92d51b80326ae 100644 --- a/server/src/test/java/org/elasticsearch/indices/SystemIndexDescriptorTests.java +++ b/server/src/test/java/org/elasticsearch/indices/SystemIndexDescriptorTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.indices.SystemIndexDescriptor.Type; @@ -19,9 +20,9 @@ import org.elasticsearch.xcontent.json.JsonXContent; import java.util.List; -import java.util.Locale; import java.util.Map; +import static org.elasticsearch.indices.SystemIndexDescriptor.VERSION_META_KEY; import static org.elasticsearch.indices.SystemIndexDescriptor.findDynamicMapping; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -29,16 +30,26 @@ public class SystemIndexDescriptorTests extends ESTestCase { - private static final String MAPPINGS = String.format(Locale.ROOT, """ + private static final int TEST_MAPPINGS_VERSION = 10; + private static final int TEST_MAPPINGS_PRIOR_VERSION = 5; + private static final int TEST_MAPPINGS_NONEXISTENT_VERSION = 2; + + private static final String MAPPINGS_FORMAT_STRING = """ { "_doc": { "_meta": { "version": "7.4.0", - "%s": 1 + "%s": %d } } } - """, SystemIndexDescriptor.VERSION_META_KEY); + """; + + private static final String MAPPINGS = Strings.format( + MAPPINGS_FORMAT_STRING, + SystemIndexDescriptor.VERSION_META_KEY, + TEST_MAPPINGS_VERSION + ); /** * Tests the various validation rules that are applied when creating a new system index descriptor. @@ -261,7 +272,7 @@ public void testGetDescriptorCompatibleWith() { .setAliasName(".system") .setType(Type.INTERNAL_MANAGED) .setSettings(Settings.EMPTY) - .setMappings(MAPPINGS) + .setMappings(Strings.format(MAPPINGS_FORMAT_STRING, VERSION_META_KEY, TEST_MAPPINGS_PRIOR_VERSION)) .setVersionMetaKey("version") .setOrigin("system") .setMinimumNodeVersion(Version.V_7_0_0) @@ -282,7 +293,11 @@ public void testGetDescriptorCompatibleWith() { SystemIndexDescriptor compat = descriptor.getDescriptorCompatibleWith(Version.CURRENT); assertSame(descriptor, compat); + compat = descriptor.getDescriptorCompatibleWith(descriptor.getMappingsVersion()); + assertSame(descriptor, compat); + assertNull(descriptor.getDescriptorCompatibleWith(Version.fromString("6.8.0"))); + assertNull(descriptor.getDescriptorCompatibleWith(new SystemIndexDescriptor.MappingsVersion(TEST_MAPPINGS_NONEXISTENT_VERSION, 1))); compat = descriptor.getDescriptorCompatibleWith(Version.CURRENT.minimumCompatibilityVersion()); assertSame(descriptor, compat); @@ -291,10 +306,22 @@ public void testGetDescriptorCompatibleWith() { compat = descriptor.getDescriptorCompatibleWith(priorToMin); assertSame(prior, compat); + SystemIndexDescriptor.MappingsVersion priorToMinMappingsVersion = new SystemIndexDescriptor.MappingsVersion( + TEST_MAPPINGS_PRIOR_VERSION, + 1 + ); + compat = descriptor.getDescriptorCompatibleWith(priorToMinMappingsVersion); + assertSame(prior, compat); + compat = descriptor.getDescriptorCompatibleWith( VersionUtils.randomVersionBetween(random(), prior.getMinimumNodeVersion(), priorToMin) ); assertSame(prior, compat); + + compat = descriptor.getDescriptorCompatibleWith( + new SystemIndexDescriptor.MappingsVersion(randomIntBetween(TEST_MAPPINGS_PRIOR_VERSION, TEST_MAPPINGS_VERSION - 1), 1) + ); + assertSame(prior, compat); } public void testSystemIndicesMustBeHidden() { @@ -368,7 +395,7 @@ public void testUnmanagedIndexMappingsVersion() { // test mapping versions can't be negative public void testNegativeMappingsVersion() { int negativeVersion = randomIntBetween(Integer.MIN_VALUE, -1); - String mappings = String.format(Locale.ROOT, """ + String mappings = Strings.format(""" { "_doc": { "_meta": { @@ -415,20 +442,8 @@ public void testHashesIgnoreMappingMetadata() { } """; - String mappings1 = String.format( - Locale.ROOT, - mappingFormatString, - "8.9.0", - SystemIndexDescriptor.VERSION_META_KEY, - randomIntBetween(1, 10) - ); - String mappings2 = String.format( - Locale.ROOT, - mappingFormatString, - "8.10.0", - SystemIndexDescriptor.VERSION_META_KEY, - randomIntBetween(11, 20) - ); + String mappings1 = Strings.format(mappingFormatString, "8.9.0", SystemIndexDescriptor.VERSION_META_KEY, randomIntBetween(1, 10)); + String mappings2 = Strings.format(mappingFormatString, "8.10.0", SystemIndexDescriptor.VERSION_META_KEY, randomIntBetween(11, 20)); SystemIndexDescriptor descriptor1 = priorSystemIndexDescriptorBuilder().setMappings(mappings1).build(); SystemIndexDescriptor descriptor2 = priorSystemIndexDescriptorBuilder().setMappings(mappings2).build(); From 406a1897c6f126a70f23e341c737304526c41485 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 20 Sep 2023 07:53:39 +0200 Subject: [PATCH 17/27] Mute SignificantTermsSignificanceScoreIT#testScriptScore (#99691) relates https://github.com/elastic/elasticsearch/issues/99690 --- .../aggregations/bucket/SignificantTermsSignificanceScoreIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index c8d89785fc4af..24fd711d18a72 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -472,6 +472,7 @@ private void indexEqualTestData() throws ExecutionException, InterruptedExceptio indexRandom(true, false, indexRequestBuilders); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99690") public void testScriptScore() throws ExecutionException, InterruptedException, IOException { String type = randomBoolean() ? "text" : "long"; indexRandomFrequencies01(type); From ad3cc7c70b3498ec193a40529bebd06a3bf48853 Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Wed, 20 Sep 2023 09:10:09 +0100 Subject: [PATCH 18/27] [DSL] Atomic replace the source with the downsample index, deleting the source index (#99610) When downsampling via the data stream lifecycle we replace the source index in the data stream with its downsampled index, and then delete the source index. Before this PR these were two steps. This changes so the replacement is atomically executed i.e. the downsample index is brought into the data stream and the source index is deleted. This also allows the downsampling operation to be executed on system data streams (which the previous execution model prevented) --- .../datastreams/DataStreamsPlugin.java | 3 +- .../DeleteDataStreamTransportAction.java | 14 +- .../lifecycle/DataStreamLifecycleService.java | 49 +++---- ...teSourceAndAddDownsampleIndexExecutor.java | 59 ++++++++ ... => DeleteSourceAndAddDownsampleToDS.java} | 111 ++++++++------- ...aceBackingWithDownsampleIndexExecutor.java | 108 -------------- .../DataStreamGetWriteIndexTests.java | 1 - .../DeleteDataStreamTransportActionTests.java | 36 +---- .../DataStreamLifecycleServiceTests.java | 77 ++++------ ...rceAndAddDownsampleIndexExecutorTests.java | 56 ++++++++ ...eleteSourceAndAddDownsampleToDSTests.java} | 90 ++++-------- ...ckingWithDownsampleIndexExecutorTests.java | 104 -------------- .../elasticsearch/cluster/ClusterModule.java | 4 - .../metadata/MetadataDataStreamsService.java | 9 -- .../metadata/MetadataDeleteIndexService.java | 4 +- .../metadata/MetadataIndexAliasesService.java | 8 +- .../indices/SystemDataStreamDescriptor.java | 13 -- .../java/org/elasticsearch/node/Node.java | 1 - .../snapshots/RestoreService.java | 13 +- .../MetadataDeleteIndexServiceTests.java | 26 +++- .../MetadataIndexAliasesServiceTests.java | 59 ++++---- .../snapshots/SnapshotResiliencyTests.java | 1 - .../metadata/DataStreamTestHelper.java | 2 +- .../core/security/user/InternalUsers.java | 5 +- .../security/user/InternalUsersTests.java | 4 +- ...StreamLifecycleDownsamplingSecurityIT.java | 133 +++--------------- 26 files changed, 358 insertions(+), 632 deletions(-) create mode 100644 modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutor.java rename modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/{ReplaceSourceWithDownsampleIndexTask.java => DeleteSourceAndAddDownsampleToDS.java} (65%) delete mode 100644 modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutor.java create mode 100644 modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutorTests.java rename modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/{ReplaceSourceWithDownsampleIndexTaskTests.java => DeleteSourceAndAddDownsampleToDSTests.java} (78%) delete mode 100644 modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutorTests.java diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index 313a6dd459668..6cccfb2a8d9be 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -188,7 +188,8 @@ public Collection createComponents( getClock(), threadPool, threadPool::absoluteTimeInMillis, - errorStoreInitialisationService.get() + errorStoreInitialisationService.get(), + allocationService ) ); dataLifecycleInitialisationService.get().init(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java index 75713c85df4a1..904b918fe5ae4 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.index.Index; import org.elasticsearch.indices.SystemIndices; @@ -48,7 +49,6 @@ public class DeleteDataStreamTransportAction extends AcknowledgedTransportMaster private static final Logger LOGGER = LogManager.getLogger(DeleteDataStreamTransportAction.class); - private final MetadataDeleteIndexService deleteIndexService; private final SystemIndices systemIndices; @Inject @@ -58,7 +58,6 @@ public DeleteDataStreamTransportAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - MetadataDeleteIndexService deleteIndexService, SystemIndices systemIndices ) { super( @@ -71,7 +70,6 @@ public DeleteDataStreamTransportAction( indexNameExpressionResolver, ThreadPool.Names.SAME ); - this.deleteIndexService = deleteIndexService; this.systemIndices = systemIndices; } @@ -100,11 +98,11 @@ public void onFailure(Exception e) { @Override public ClusterState execute(ClusterState currentState) { return removeDataStream( - deleteIndexService, indexNameExpressionResolver, currentState, request, - ds -> systemIndices.validateDataStreamAccess(ds, threadPool.getThreadContext()) + ds -> systemIndices.validateDataStreamAccess(ds, threadPool.getThreadContext()), + clusterService.getSettings() ); } @@ -122,11 +120,11 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String } static ClusterState removeDataStream( - MetadataDeleteIndexService deleteIndexService, IndexNameExpressionResolver indexNameExpressionResolver, ClusterState currentState, DeleteDataStreamAction.Request request, - Consumer systemDataStreamAccessValidator + Consumer systemDataStreamAccessValidator, + Settings settings ) { List names = getDataStreamNames(indexNameExpressionResolver, currentState, request.getNames(), request.indicesOptions()); Set dataStreams = new HashSet<>(names); @@ -168,7 +166,7 @@ static ClusterState removeDataStream( metadata.removeDataStream(ds); } currentState = ClusterState.builder(currentState).metadata(metadata).build(); - return deleteIndexService.deleteIndices(currentState, backingIndicesToRemove); + return MetadataDeleteIndexService.deleteIndices(currentState, backingIndicesToRemove, settings); } @Override diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 3311d064b4816..3f07e9ea478df 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -39,6 +39,7 @@ import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; @@ -52,8 +53,8 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.datastreams.lifecycle.downsampling.ReplaceBackingWithDownsampleIndexExecutor; -import org.elasticsearch.datastreams.lifecycle.downsampling.ReplaceSourceWithDownsampleIndexTask; +import org.elasticsearch.datastreams.lifecycle.downsampling.DeleteSourceAndAddDownsampleIndexExecutor; +import org.elasticsearch.datastreams.lifecycle.downsampling.DeleteSourceAndAddDownsampleToDS; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; @@ -80,7 +81,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock.WRITE; import static org.elasticsearch.cluster.metadata.IndexMetadata.DownsampleTaskStatus.STARTED; -import static org.elasticsearch.cluster.metadata.IndexMetadata.DownsampleTaskStatus.SUCCESS; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_DOWNSAMPLE_STATUS; import static org.elasticsearch.datastreams.DataStreamsPlugin.LIFECYCLE_CUSTOM_INDEX_METADATA_KEY; @@ -133,7 +133,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab private final ClusterService clusterService; private final ThreadPool threadPool; final ResultDeduplicator transportActionsDeduplicator; - final ResultDeduplicator clusterStateChangesDeduplicator; + final ResultDeduplicator clusterStateChangesDeduplicator; private final LongSupplier nowSupplier; private final Clock clock; private final DataStreamLifecycleErrorStore errorStore; @@ -143,7 +143,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab private SchedulerEngine.Job scheduledJob; private final SetOnce scheduler = new SetOnce<>(); private final MasterServiceTaskQueue forceMergeClusterStateUpdateTaskQueue; - private final MasterServiceTaskQueue swapSourceWithDownsampleIndexQueue; + private final MasterServiceTaskQueue swapSourceWithDownsampleIndexQueue; private volatile ByteSizeValue targetMergePolicyFloorSegment; private volatile int targetMergePolicyFactor; @@ -168,7 +168,8 @@ public DataStreamLifecycleService( Clock clock, ThreadPool threadPool, LongSupplier nowSupplier, - DataStreamLifecycleErrorStore errorStore + DataStreamLifecycleErrorStore errorStore, + AllocationService allocationService ) { this.settings = settings; this.client = client; @@ -192,8 +193,8 @@ public DataStreamLifecycleService( ); this.swapSourceWithDownsampleIndexQueue = clusterService.createTaskQueue( "data-stream-lifecycle-swap-source-with-downsample", - Priority.NORMAL, - new ReplaceBackingWithDownsampleIndexExecutor(client) + Priority.URGENT, // urgent priority as this deletes indices + new DeleteSourceAndAddDownsampleIndexExecutor(allocationService) ); } @@ -394,7 +395,6 @@ Set maybeExecuteDownsampling(ClusterState state, DataStream dataStream, L } String indexName = index.getName(); - IndexMetadata.DownsampleTaskStatus backingIndexDownsamplingStatus = INDEX_DOWNSAMPLE_STATUS.get(backingIndexMeta.getSettings()); String downsamplingSourceIndex = IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.get(backingIndexMeta.getSettings()); // if the current index is not a downsample we want to mark the index as read-only before proceeding with downsampling @@ -402,21 +402,7 @@ Set maybeExecuteDownsampling(ClusterState state, DataStream dataStream, L && state.blocks().indexBlocked(ClusterBlockLevel.WRITE, indexName) == false) { affectedIndices.add(index); addIndexBlockOnce(indexName); - } else if (org.elasticsearch.common.Strings.hasText(downsamplingSourceIndex) - && backingIndexDownsamplingStatus.equals(SUCCESS)) { - // if the backing index is a downsample index itself, let's check if its source index still exists as we must delete it - IndexMetadata downsampleSourceIndex = metadata.index(downsamplingSourceIndex); - if (downsampleSourceIndex != null) { - // we mark the backing index as affected as we don't want subsequent operations that might change its state to - // be performed, as we might lose the way to identify that we must delete its replacement source index - affectedIndices.add(index); - // delete downsampling source index (that's not part of the data stream anymore) before doing any more - // downsampling - deleteIndexOnce(downsamplingSourceIndex, "replacement with its downsampled index in the data stream"); - } - } - - if (affectedIndices.contains(index) == false) { + } else { // we're not performing any operation for this index which means that it: // - has matching downsample rounds // - is read-only @@ -582,7 +568,10 @@ private Set evaluateDownsampleStatus( */ private void replaceBackingIndexWithDownsampleIndexOnce(DataStream dataStream, String backingIndexName, String downsampleIndexName) { clusterStateChangesDeduplicator.executeOnce( - new ReplaceSourceWithDownsampleIndexTask(dataStream.getName(), backingIndexName, downsampleIndexName, null), + // we use a String key here as otherwise it's ... awkward as we have to create the DeleteSourceAndAddDownsampleToDS as the + // key _without_ a listener (passing in null) and then below we create it again with the `reqListener`. We're using a String + // as it seems to be clearer. + "dsl-replace-" + dataStream.getName() + "-" + backingIndexName + "-" + downsampleIndexName, new ErrorRecordingActionListener( backingIndexName, errorStore, @@ -601,8 +590,14 @@ private void replaceBackingIndexWithDownsampleIndexOnce(DataStream dataStream, S dataStream ); swapSourceWithDownsampleIndexQueue.submitTask( - "data-stream-lifecycle-replace-source[" + backingIndexName + "]-with-[" + downsampleIndexName + "]", - new ReplaceSourceWithDownsampleIndexTask(dataStream.getName(), backingIndexName, downsampleIndexName, reqListener), + "data-stream-lifecycle-delete-source[" + backingIndexName + "]-add-to-datastream-[" + downsampleIndexName + "]", + new DeleteSourceAndAddDownsampleToDS( + settings, + dataStream.getName(), + backingIndexName, + downsampleIndexName, + reqListener + ), null ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutor.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutor.java new file mode 100644 index 0000000000000..bf31146d711b2 --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutor.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.downsampling; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.SimpleBatchedExecutor; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.core.Tuple; + +import static org.elasticsearch.cluster.routing.allocation.allocator.AllocationActionListener.rerouteCompletionIsNotRequired; + +/** + * Cluster service task (batched) executor that deletes the source index and adds its downsample index to the data stream. + */ +public class DeleteSourceAndAddDownsampleIndexExecutor extends SimpleBatchedExecutor { + private static final Logger LOGGER = LogManager.getLogger(DeleteSourceAndAddDownsampleToDS.class); + private final AllocationService allocationService; + + public DeleteSourceAndAddDownsampleIndexExecutor(AllocationService allocationService) { + this.allocationService = allocationService; + } + + @Override + public Tuple executeTask(DeleteSourceAndAddDownsampleToDS task, ClusterState clusterState) throws Exception { + return Tuple.tuple(task.execute(clusterState), null); + } + + @Override + public void taskSucceeded(DeleteSourceAndAddDownsampleToDS task, Void unused) { + LOGGER.trace( + "Updated cluster state and replaced index [{}] with index [{}] in data stream [{}]. Index [{}] was deleted", + task.getSourceBackingIndex(), + task.getDownsampleIndex(), + task.getDataStreamName(), + task.getSourceBackingIndex() + ); + task.getListener().onResponse(null); + } + + @Override + public ClusterState afterBatchExecution(ClusterState clusterState, boolean clusterStateChanged) { + if (clusterStateChanged) { + return allocationService.reroute( + clusterState, + "deleted indices", + rerouteCompletionIsNotRequired() // it is not required to balance shard to report index deletion success + ); + } + return clusterState; + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleToDS.java similarity index 65% rename from modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java rename to modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleToDS.java index 70cf57456e099..3e49499740349 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleToDS.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataDeleteIndexService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexSettings; @@ -24,27 +25,31 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Set; import static org.elasticsearch.datastreams.DataStreamsPlugin.LIFECYCLE_CUSTOM_INDEX_METADATA_KEY; /** - * Cluster state task that replaces a source index in a data stream with its downsample index. + * Cluster state task that deletes a source index in a data stream and adds its downsample index. * In the process it will configure the origination date for the downsample index (so it can * have a correct generation time). */ -public class ReplaceSourceWithDownsampleIndexTask implements ClusterStateTaskListener { - private static final Logger LOGGER = LogManager.getLogger(ReplaceSourceWithDownsampleIndexTask.class); +public class DeleteSourceAndAddDownsampleToDS implements ClusterStateTaskListener { + private static final Logger LOGGER = LogManager.getLogger(DeleteSourceAndAddDownsampleToDS.class); + private final Settings settings; private ActionListener listener; private final String dataStreamName; private final String sourceBackingIndex; private final String downsampleIndex; - public ReplaceSourceWithDownsampleIndexTask( + public DeleteSourceAndAddDownsampleToDS( + Settings settings, String dataStreamName, String sourceBackingIndex, String downsampleIndex, ActionListener listener ) { + this.settings = settings; this.dataStreamName = dataStreamName; this.sourceBackingIndex = sourceBackingIndex; this.downsampleIndex = downsampleIndex; @@ -53,17 +58,16 @@ public ReplaceSourceWithDownsampleIndexTask( ClusterState execute(ClusterState state) { LOGGER.trace( - "Updating cluster state to replace index [{}] with [{}] in data stream [{}]", + "Updating cluster state to replace and delete index [{}] with [{}] in data stream [{}]", sourceBackingIndex, downsampleIndex, dataStreamName ); - IndexAbstraction sourceIndexAbstraction = state.metadata().getIndicesLookup().get(sourceBackingIndex); IndexMetadata downsampleIndexMeta = state.metadata().index(downsampleIndex); if (downsampleIndexMeta == null) { // the downsample index doesn't exist anymore so nothing to replace here LOGGER.trace( - "Received request replace index [{}] with [{}] in data stream [{}] but the replacement index [{}] doesn't exist." + "Received request to replace index [{}] with [{}] in data stream [{}] but the replacement index [{}] doesn't exist." + "Nothing to do here.", sourceBackingIndex, downsampleIndex, @@ -72,9 +76,9 @@ ClusterState execute(ClusterState state) { ); return state; } - IndexMetadata sourceIndexMeta = state.metadata().index(sourceBackingIndex); - DataStream dataStream = state.metadata().dataStreams().get(dataStreamName); + IndexAbstraction sourceIndexAbstraction = state.metadata().getIndicesLookup().get(sourceBackingIndex); if (sourceIndexAbstraction == null) { + DataStream dataStream = state.metadata().dataStreams().get(dataStreamName); // index was deleted in the meantime, so let's check if we can make sure the downsample index ends up in the // data stream (if not already there) if (dataStream != null @@ -91,8 +95,24 @@ ClusterState execute(ClusterState state) { return ClusterState.builder(state).metadata(newMetaData).build(); } } else { - // the source index exists DataStream sourceParentDataStream = sourceIndexAbstraction.getParentDataStream(); + if (sourceParentDataStream != null && sourceParentDataStream.getWriteIndex().getName().equals(sourceBackingIndex)) { + String errorMessage = String.format( + Locale.ROOT, + "index [%s] is the write index for data stream [%s] and cannot be replaced", + sourceBackingIndex, + sourceParentDataStream.getName() + ); + throw new IllegalStateException(errorMessage); + } + + IndexMetadata sourceIndexMeta = state.metadata().index(sourceBackingIndex); + assert sourceIndexMeta != null + : "the source index abstraction exists in the indices lookup, so the index metadata must " + + "exist in the same cluster state metadata"; + // the source index exists so let's start by deleting it + state = MetadataDeleteIndexService.deleteIndices(state, Set.of(sourceIndexMeta.getIndex()), settings); + DataStream dataStream = state.metadata().dataStreams().get(dataStreamName); if (sourceParentDataStream != null) { assert sourceParentDataStream.getName().equals(dataStreamName) : "the backing index must be part of the provided data " @@ -101,49 +121,14 @@ ClusterState execute(ClusterState state) { + "] but it is instead part of data stream [" + sourceParentDataStream.getName() + "]"; - if (sourceParentDataStream.getWriteIndex().getName().equals(sourceBackingIndex)) { - String errorMessage = String.format( - Locale.ROOT, - "index [%s] is the write index for data stream [%s] and cannot be replaced", - sourceBackingIndex, - sourceParentDataStream.getName() - ); - throw new IllegalStateException(errorMessage); - } - if (sourceIndexMeta != null) { - // both indices exist, let's copy the origination date from the source index to the downsample index - Metadata.Builder newMetaData = Metadata.builder(state.getMetadata()); - TimeValue generationLifecycleDate = dataStream.getGenerationLifecycleDate(sourceIndexMeta); - assert generationLifecycleDate != null : "write index must never be downsampled, or replaced"; - IndexMetadata updatedDownsampleMetadata = copyDataStreamLifecycleState( - sourceIndexMeta, - downsampleIndexMeta, - generationLifecycleDate.millis() - ); - - newMetaData.put(updatedDownsampleMetadata, true); - // replace source with downsample - newMetaData.put(dataStream.replaceBackingIndex(sourceIndexMeta.getIndex(), downsampleIndexMeta.getIndex())); - return ClusterState.builder(state).metadata(newMetaData).build(); - } + // both indices exist, let's copy the origination date from the source index to the downsample index + return addDownsampleIndexToDataStream(state, dataStream, sourceIndexMeta, downsampleIndexMeta); } else { // the source index is not part of a data stream, so let's check if we can make sure the downsample index ends up in the // data stream if (dataStream != null && dataStream.getIndices().stream().filter(index -> index.getName().equals(downsampleIndex)).findAny().isEmpty()) { - Metadata.Builder newMetaData = Metadata.builder(state.getMetadata()); - TimeValue generationLifecycleDate = dataStream.getGenerationLifecycleDate(sourceIndexMeta); - assert generationLifecycleDate != null : "write index must never be downsampled, or replaced"; - - IndexMetadata updatedDownsampleMetadata = copyDataStreamLifecycleState( - sourceIndexMeta, - downsampleIndexMeta, - generationLifecycleDate.millis() - ); - newMetaData.put(updatedDownsampleMetadata, true); - // add downsample index to data stream - newMetaData.put(dataStream.addBackingIndex(state.metadata(), downsampleIndexMeta.getIndex())); - return ClusterState.builder(state).metadata(newMetaData).build(); + return addDownsampleIndexToDataStream(state, dataStream, sourceIndexMeta, downsampleIndexMeta); } } } @@ -151,6 +136,34 @@ ClusterState execute(ClusterState state) { return state; } + /** + * This creates a new {@link ClusterState} with an updated data stream that contains the provided downsample index. + * This method is private as it fits into the flow of this cluster state task - i.e. the source index has already been removed from + * the provided state. + */ + private static ClusterState addDownsampleIndexToDataStream( + ClusterState state, + DataStream dataStream, + IndexMetadata sourceIndexMeta, + IndexMetadata downsampleIndexMeta + ) { + Metadata.Builder newMetaData = Metadata.builder(state.getMetadata()); + TimeValue generationLifecycleDate = dataStream.getGenerationLifecycleDate(sourceIndexMeta); + // the generation lifecycle date is null only for the write index + // we fail already if attempting to delete/downsample the write index, so the following assertion just re-inforces that + assert generationLifecycleDate != null : "write index must never be downsampled, or replaced"; + IndexMetadata updatedDownsampleMetadata = copyDataStreamLifecycleState( + sourceIndexMeta, + downsampleIndexMeta, + generationLifecycleDate.millis() + ); + + newMetaData.put(updatedDownsampleMetadata, true); + // we deleted the source already so let's add the downsample index to the data stream + newMetaData.put(dataStream.addBackingIndex(state.metadata(), downsampleIndexMeta.getIndex())); + return ClusterState.builder(state).metadata(newMetaData).build(); + } + /** * Copies the data stream lifecycle state information from the source index to the destination. * This ensures the destination index will have a generation time by setting the {@link IndexSettings#LIFECYCLE_ORIGINATION_DATE} and @@ -215,7 +228,7 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - ReplaceSourceWithDownsampleIndexTask that = (ReplaceSourceWithDownsampleIndexTask) o; + DeleteSourceAndAddDownsampleToDS that = (DeleteSourceAndAddDownsampleToDS) o; return Objects.equals(dataStreamName, that.dataStreamName) && Objects.equals(sourceBackingIndex, that.sourceBackingIndex) && Objects.equals(downsampleIndex, that.downsampleIndex); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutor.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutor.java deleted file mode 100644 index 625c1f71a92db..0000000000000 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutor.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.lifecycle.downsampling; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.SimpleBatchedExecutor; -import org.elasticsearch.common.Strings; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.snapshots.SnapshotInProgressException; - -/** - * Cluster service task (batched) executor that executes the replacement of data stream backing index with its - * downsampled index. - * After the task is executed the executor issues a delete API call for the source index however, it doesn't - * hold up the task listener (nb we notify the listener before we call the delete API so we don't introduce - * weird partial failure scenarios - if the delete API fails the - * {@link org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService} will retry on the next run so the source index will get - * deleted) - */ -public class ReplaceBackingWithDownsampleIndexExecutor extends SimpleBatchedExecutor { - private static final Logger LOGGER = LogManager.getLogger(ReplaceSourceWithDownsampleIndexTask.class); - private final Client client; - - public ReplaceBackingWithDownsampleIndexExecutor(Client client) { - this.client = client; - } - - @Override - public Tuple executeTask(ReplaceSourceWithDownsampleIndexTask task, ClusterState clusterState) throws Exception { - return Tuple.tuple(task.execute(clusterState), null); - } - - @Override - public void taskSucceeded(ReplaceSourceWithDownsampleIndexTask task, Void unused) { - LOGGER.trace( - "Updated cluster state and replaced index [{}] with index [{}] in data stream [{}]", - task.getSourceBackingIndex(), - task.getDownsampleIndex(), - task.getDataStreamName() - ); - task.getListener().onResponse(null); - - LOGGER.trace( - "Issuing request to delete index [{}] as it's not part of data stream [{}] anymore", - task.getSourceBackingIndex(), - task.getDataStreamName() - ); - // chain an optimistic delete of the source index call here (if it fails it'll be retried by the data stream lifecycle loop) - client.admin().indices().delete(new DeleteIndexRequest(task.getSourceBackingIndex()), new ActionListener<>() { - @Override - public void onResponse(AcknowledgedResponse acknowledgedResponse) { - if (acknowledgedResponse.isAcknowledged()) { - LOGGER.info( - "Data stream lifecycle successfully deleted index [{}] due to being replaced by the downsampled index [{}] in" - + " data stream [{}]", - task.getSourceBackingIndex(), - task.getDownsampleIndex(), - task.getDataStreamName() - ); - } else { - LOGGER.trace( - "The delete request for index [{}] was not acknowledged. Data stream lifecycle service will retry on the" - + " next run if the index still exists", - task.getSourceBackingIndex() - ); - } - } - - @Override - public void onFailure(Exception e) { - if (e instanceof IndexNotFoundException) { - // index was already deleted, treat this as a success - LOGGER.trace("Did not delete index [{}] as it was already deleted", task.getSourceBackingIndex()); - return; - } - - if (e instanceof SnapshotInProgressException) { - LOGGER.info( - "Data stream lifecycle is unable to delete index [{}] because it's part of an ongoing snapshot. Retrying on " - + "the next data stream lifecycle run", - task.getSourceBackingIndex() - ); - } else { - LOGGER.error( - () -> Strings.format( - "Data stream lifecycle encountered an error trying to delete index [%s]. It will retry on its next run.", - task.getSourceBackingIndex() - ), - e - ); - } - } - }); - } -} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java index a9bb94658b890..e55ff022693b3 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java @@ -270,7 +270,6 @@ public void setup() throws Exception { MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, indicesService, - null, xContentRegistry() ); rolloverService = new MetadataRolloverService( diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java index 59aa4c17387c8..29c88b7f75463 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java @@ -14,8 +14,6 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.metadata.MetadataDeleteIndexService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; @@ -31,15 +29,11 @@ import java.util.Collections; import java.util.List; -import java.util.Set; import java.util.function.Consumer; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.sameInstance; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class DeleteDataStreamTransportActionTests extends ESTestCase { @@ -53,7 +47,7 @@ public void testDeleteDataStream() { ClusterState cs = DataStreamTestHelper.getClusterStateWithDataStreams(List.of(new Tuple<>(dataStreamName, 2)), otherIndices); DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { dataStreamName }); - ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(getMetadataDeleteIndexService(), iner, cs, req, validator); + ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(iner, cs, req, validator, Settings.EMPTY); assertThat(newState.metadata().dataStreams().size(), equalTo(0)); assertThat(newState.metadata().indices().size(), equalTo(otherIndices.size())); for (String indexName : otherIndices) { @@ -74,7 +68,7 @@ public void testDeleteMultipleDataStreams() { ); DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { "ba*", "eggplant" }); - ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(getMetadataDeleteIndexService(), iner, cs, req, validator); + ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(iner, cs, req, validator, Settings.EMPTY); assertThat(newState.metadata().dataStreams().size(), equalTo(1)); DataStream remainingDataStream = newState.metadata().dataStreams().get(dataStreamNames[0]); assertNotNull(remainingDataStream); @@ -100,7 +94,7 @@ public void testDeleteSnapshottingDataStream() { DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { dataStreamName }); SnapshotInProgressException e = expectThrows( SnapshotInProgressException.class, - () -> DeleteDataStreamTransportAction.removeDataStream(getMetadataDeleteIndexService(), iner, snapshotCs, req, validator) + () -> DeleteDataStreamTransportAction.removeDataStream(iner, snapshotCs, req, validator, Settings.EMPTY) ); assertThat( @@ -146,16 +140,16 @@ public void testDeleteNonexistentDataStream() { expectThrows( ResourceNotFoundException.class, () -> DeleteDataStreamTransportAction.removeDataStream( - getMetadataDeleteIndexService(), iner, cs, new DeleteDataStreamAction.Request(new String[] { dataStreamName }), - validator + validator, + Settings.EMPTY ) ); DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { dataStreamName + "*" }); - ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(getMetadataDeleteIndexService(), iner, cs, req, validator); + ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(iner, cs, req, validator, Settings.EMPTY); assertThat(newState, sameInstance(cs)); assertThat(newState.metadata().dataStreams().size(), equalTo(cs.metadata().dataStreams().size())); assertThat( @@ -164,22 +158,4 @@ public void testDeleteNonexistentDataStream() { ); } - @SuppressWarnings("unchecked") - private static MetadataDeleteIndexService getMetadataDeleteIndexService() { - MetadataDeleteIndexService s = mock(MetadataDeleteIndexService.class); - when(s.deleteIndices(any(ClusterState.class), any(Set.class))).thenAnswer(mockInvocation -> { - ClusterState currentState = (ClusterState) mockInvocation.getArguments()[0]; - Set indices = (Set) mockInvocation.getArguments()[1]; - - final Metadata.Builder b = Metadata.builder(currentState.metadata()); - for (Index index : indices) { - b.remove(index.getName()); - } - - return ClusterState.builder(currentState).metadata(b.build()).build(); - }); - - return s; - } - } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 5b73d94be578a..712ad07bc0634 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -29,6 +29,8 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.TestShardRoutingRoleStrategies; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.DataStream; @@ -44,6 +46,11 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.ClusterSettings; @@ -57,9 +64,11 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.snapshots.EmptySnapshotsInfoService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.test.gateway.TestGatewayAllocator; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequest; @@ -70,6 +79,7 @@ import java.time.Clock; import java.time.Instant; import java.time.ZoneId; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -96,7 +106,6 @@ import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -129,6 +138,18 @@ public void setupServices() { clientSeenRequests = new CopyOnWriteArrayList<>(); client = getTransportRequestsRecordingClient(); + AllocationService allocationService = new AllocationService( + new AllocationDeciders( + new HashSet<>( + Arrays.asList(new SameShardAllocationDecider(clusterSettings), new ReplicaAfterPrimaryActiveAllocationDecider()) + ) + ), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE, + TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY + ); dataStreamLifecycleService = new DataStreamLifecycleService( Settings.EMPTY, client, @@ -136,7 +157,8 @@ public void setupServices() { clock, threadPool, () -> now, - new DataStreamLifecycleErrorStore() + new DataStreamLifecycleErrorStore(), + allocationService ); clientDelegate = null; dataStreamLifecycleService.init(); @@ -1027,9 +1049,8 @@ public void testDownsampling() throws Exception { } // on this run, as downsampling is complete we expect to trigger the {@link - // org.elasticsearch.datastreams.lifecycle.downsampling.ReplaceSourceWithDownsampleIndexTask} - // cluster service task and replace the source index with the downsample index in the data stream - // we also expect a delete request for the source index to be witnessed + // org.elasticsearch.datastreams.lifecycle.downsampling.DeleteSourceAndAddDownsampleToDS} + // cluster service task and delete the source index whilst adding the downsample index in the data stream affectedIndices = dataStreamLifecycleService.maybeExecuteDownsampling(clusterService.state(), dataStream, List.of(firstGenIndex)); assertThat(affectedIndices, is(Set.of(firstGenIndex))); assertBusy(() -> { @@ -1038,51 +1059,13 @@ public void testDownsampling() throws Exception { // the downsample index must be part of the data stream assertThat(downsample.getParentDataStream(), is(notNullValue())); assertThat(downsample.getParentDataStream().getName(), is(dataStreamName)); - // the source index must not be part of the data stream + // the source index was deleted IndexAbstraction sourceIndexAbstraction = newState.metadata().getIndicesLookup().get(firstGenIndexName); - assertThat(sourceIndexAbstraction.getParentDataStream(), is(nullValue())); + assertThat(sourceIndexAbstraction, is(nullValue())); - // {@link ReplaceBackingWithDownsampleIndexExecutor} triggers a delete reuqest for the backing index when the cluster state - // is successfully updated - assertThat(clientSeenRequests.size(), is(3)); - assertThat(clientSeenRequests.get(2), instanceOf(DeleteIndexRequest.class)); + // no further requests should be triggered + assertThat(clientSeenRequests.size(), is(2)); }, 30, TimeUnit.SECONDS); - - // NOTE from now on we need to refresh the state and dataStream variables as the data stream lifecycle service updated the - // cluster state in the cluster service via {@link ReplaceBackingWithDownsampleIndexExecutor} - dataStream = clusterService.state().metadata().dataStreams().get(dataStreamName); - state = clusterService.state(); - - // before we remove the backing index (to "implement" the above issued delete request) let's issue another data stream service - // donwsampling run as the service should detect that the index has not been deleted and issue a request itself - - // note that we call the downsampling with the downsampled index from now on, as IT is the one that's part of the datastream now - IndexMetadata downsampleMeta = clusterService.state().metadata().index(downsampleIndexName); - affectedIndices = dataStreamLifecycleService.maybeExecuteDownsampling( - clusterService.state(), - dataStream, - List.of(downsampleMeta.getIndex()) - ); - assertThat(affectedIndices, is(Set.of(downsampleMeta.getIndex()))); - assertThat(clientSeenRequests.size(), is(4)); - assertThat(clientSeenRequests.get(3), instanceOf(DeleteIndexRequest.class)); - - { - // let's remove the backing index (as delete was successful ... say) - Metadata.Builder metadataBuilder = Metadata.builder(state.metadata()); - metadataBuilder.remove(firstGenIndexName); - state = ClusterState.builder(state).metadata(metadataBuilder).build(); - setState(clusterService, state); - } - - // downsample was successful for this index, nothing else to have been executed here (still 4 witnessed reuqests as before) - affectedIndices = dataStreamLifecycleService.maybeExecuteDownsampling( - clusterService.state(), - dataStream, - List.of(downsampleMeta.getIndex()) - ); - assertThat(affectedIndices, is(empty())); - assertThat(clientSeenRequests.size(), is(4)); } public void testDownsamplingWhenTargetIndexNameClashYieldsException() throws Exception { diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutorTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutorTests.java new file mode 100644 index 0000000000000..e21fda0fe579b --- /dev/null +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutorTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.downsampling; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.cluster.routing.allocation.allocator.AllocationActionListener.rerouteCompletionIsNotRequired; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class DeleteSourceAndAddDownsampleIndexExecutorTests extends ESTestCase { + + public void testExecutorNotifiesListenerAndReroutesAllocationService() { + String dataStreamName = randomAlphaOfLengthBetween(10, 100); + String sourceIndex = randomAlphaOfLengthBetween(10, 100); + String downsampleIndex = randomAlphaOfLengthBetween(10, 100); + + AllocationService allocationService = mock(AllocationService.class); + DeleteSourceAndAddDownsampleIndexExecutor executor = new DeleteSourceAndAddDownsampleIndexExecutor(allocationService); + + AtomicBoolean taskListenerCalled = new AtomicBoolean(false); + executor.taskSucceeded( + new DeleteSourceAndAddDownsampleToDS(Settings.EMPTY, dataStreamName, sourceIndex, downsampleIndex, new ActionListener<>() { + @Override + public void onResponse(Void unused) { + taskListenerCalled.set(true); + } + + @Override + public void onFailure(Exception e) { + logger.error(e.getMessage(), e); + fail("unexpected exception: " + e.getMessage()); + } + }), + null + ); + assertThat(taskListenerCalled.get(), is(true)); + + ClusterState state = ClusterState.EMPTY_STATE; + executor.afterBatchExecution(state, true); + verify(allocationService).reroute(state, "deleted indices", rerouteCompletionIsNotRequired()); + } +} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleToDSTests.java similarity index 78% rename from modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java rename to modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleToDSTests.java index c3d1262c72dce..062d502ee7029 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleToDSTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.datastreams.lifecycle.downsampling; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -17,6 +16,7 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; @@ -33,7 +33,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class ReplaceSourceWithDownsampleIndexTaskTests extends ESTestCase { +public class DeleteSourceAndAddDownsampleToDSTests extends ESTestCase { private long now; @@ -58,7 +58,8 @@ public void testDownsampleIndexMissingIsNoOp() { ClusterState previousState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); String firstGeneration = DataStream.getDefaultBackingIndexName(dataStreamName, 1); - ClusterState newState = new ReplaceSourceWithDownsampleIndexTask( + ClusterState newState = new DeleteSourceAndAddDownsampleToDS( + Settings.EMPTY, dataStreamName, firstGeneration, "downsample-1s-" + firstGeneration, @@ -94,9 +95,8 @@ public void testDownsampleIsAddedToDSEvenIfSourceDeleted() { builder.put(dataStream); ClusterState previousState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); - ClusterState newState = new ReplaceSourceWithDownsampleIndexTask(dataStreamName, firstGenIndex, downsampleIndex, null).execute( - previousState - ); + ClusterState newState = new DeleteSourceAndAddDownsampleToDS(Settings.EMPTY, dataStreamName, firstGenIndex, downsampleIndex, null) + .execute(previousState); IndexAbstraction downsampleIndexAbstraction = newState.metadata().getIndicesLookup().get(downsampleIndex); assertThat(downsampleIndexAbstraction, is(notNullValue())); @@ -129,7 +129,9 @@ public void testSourceIndexIsWriteIndexThrowsException() { IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, - () -> new ReplaceSourceWithDownsampleIndexTask(dataStreamName, writeIndex, downsampleIndex, null).execute(previousState) + () -> new DeleteSourceAndAddDownsampleToDS(Settings.EMPTY, dataStreamName, writeIndex, downsampleIndex, null).execute( + previousState + ) ); assertThat( @@ -138,7 +140,7 @@ public void testSourceIndexIsWriteIndexThrowsException() { ); } - public void testSourceIsReplacedWithDownsampleAndOriginationDateIsConfigured() { + public void testSourceIsDeleteAndDownsampleOriginationDateIsConfigured() { String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); int numBackingIndices = 3; Metadata.Builder builder = Metadata.builder(); @@ -162,13 +164,14 @@ public void testSourceIsReplacedWithDownsampleAndOriginationDateIsConfigured() { // let's add some lifecycle custom metadata to the first generation index IndexMetadata indexMetadata = previousState.metadata().index(firstGenIndex); + RolloverInfo rolloverInfo = indexMetadata.getRolloverInfos().get(dataStreamName); + IndexMetadata.Builder firstGenBuilder = IndexMetadata.builder(indexMetadata) .putCustom(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY, Map.of(FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY, String.valueOf(now))); Metadata.Builder metaBuilder = Metadata.builder(previousState.metadata()).put(firstGenBuilder); previousState = ClusterState.builder(previousState).metadata(metaBuilder).build(); - ClusterState newState = new ReplaceSourceWithDownsampleIndexTask(dataStreamName, firstGenIndex, downsampleIndex, null).execute( - previousState - ); + ClusterState newState = new DeleteSourceAndAddDownsampleToDS(Settings.EMPTY, dataStreamName, firstGenIndex, downsampleIndex, null) + .execute(previousState); IndexAbstraction downsampleIndexAbstraction = newState.metadata().getIndicesLookup().get(downsampleIndex); assertThat(downsampleIndexAbstraction, is(notNullValue())); @@ -176,16 +179,11 @@ public void testSourceIsReplacedWithDownsampleAndOriginationDateIsConfigured() { // the downsample index is part of the data stream assertThat(downsampleIndexAbstraction.getParentDataStream().getName(), is(dataStreamName)); - // the source index is NOT part of the data stream + // the source index is deleted IndexAbstraction sourceIndexAbstraction = newState.metadata().getIndicesLookup().get(firstGenIndex); - assertThat(sourceIndexAbstraction, is(notNullValue())); - assertThat(sourceIndexAbstraction.getParentDataStream(), is(nullValue())); + assertThat(sourceIndexAbstraction, is(nullValue())); // let's check the downsample index has the origination date configured to the source index rollover time - IndexMetadata firstGenMeta = newState.metadata().index(firstGenIndex); - RolloverInfo rolloverInfo = firstGenMeta.getRolloverInfos().get(dataStreamName); - assertThat(rolloverInfo, is(notNullValue())); - IndexMetadata downsampleMeta = newState.metadata().index(downsampleIndex); assertThat(IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.get(downsampleMeta.getSettings()), is(rolloverInfo.getTime())); assertThat(downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY), notNullValue()); @@ -220,9 +218,8 @@ public void testSourceWithoutLifecycleMetaAndDestWithOriginationDateAlreadyConfi builder.put(dataStream); ClusterState previousState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); - ClusterState newState = new ReplaceSourceWithDownsampleIndexTask(dataStreamName, firstGenIndex, downsampleIndex, null).execute( - previousState - ); + ClusterState newState = new DeleteSourceAndAddDownsampleToDS(Settings.EMPTY, dataStreamName, firstGenIndex, downsampleIndex, null) + .execute(previousState); IndexAbstraction downsampleIndexAbstraction = newState.metadata().getIndicesLookup().get(downsampleIndex); assertThat(downsampleIndexAbstraction, is(notNullValue())); @@ -230,16 +227,15 @@ public void testSourceWithoutLifecycleMetaAndDestWithOriginationDateAlreadyConfi // the downsample index is part of the data stream assertThat(downsampleIndexAbstraction.getParentDataStream().getName(), is(dataStreamName)); - // the source index is NOT part of the data stream + // the source index was deleted IndexAbstraction sourceIndexAbstraction = newState.metadata().getIndicesLookup().get(firstGenIndex); - assertThat(sourceIndexAbstraction, is(notNullValue())); - assertThat(sourceIndexAbstraction.getParentDataStream(), is(nullValue())); + assertThat(sourceIndexAbstraction, is(nullValue())); IndexMetadata downsampleMeta = newState.metadata().index(downsampleIndex); assertThat(IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.get(downsampleMeta.getSettings()), is(downsampleOriginationDate)); } - public void testSourceIndexIsNotPartOfDSAnymore() { + public void testSourceIndexIsDeleteEvenIfNotPartOfDSAnymore() { String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); int numBackingIndices = 3; Metadata.Builder builder = Metadata.builder(); @@ -263,9 +259,8 @@ public void testSourceIndexIsNotPartOfDSAnymore() { builder.put(dataStream); ClusterState previousState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); - ClusterState newState = new ReplaceSourceWithDownsampleIndexTask(dataStreamName, firstGenIndex, downsampleIndex, null).execute( - previousState - ); + ClusterState newState = new DeleteSourceAndAddDownsampleToDS(Settings.EMPTY, dataStreamName, firstGenIndex, downsampleIndex, null) + .execute(previousState); IndexAbstraction downsampleIndexAbstraction = newState.metadata().getIndicesLookup().get(downsampleIndex); assertThat(downsampleIndexAbstraction, is(notNullValue())); @@ -273,43 +268,6 @@ public void testSourceIndexIsNotPartOfDSAnymore() { // the downsample index is part of the data stream assertThat(downsampleIndexAbstraction.getParentDataStream().getName(), is(dataStreamName)); - // origination date and the lifecycle metadata is configured even if the source index is not part of the data stream anymore - IndexMetadata firstGenMeta = newState.metadata().index(firstGenIndex); - RolloverInfo rolloverInfo = firstGenMeta.getRolloverInfos().get(dataStreamName); - assertThat(rolloverInfo, is(notNullValue())); - } - - public void testListenersIsNonConsideredInEquals() { - // the task is used as a key in a result deduplicator ({@link ResultDeduplicator}) map and the listener must not - // be taken into account - - String dataStreamName = randomAlphaOfLengthBetween(10, 100); - String sourceBackingIndex = randomAlphaOfLengthBetween(10, 100); - String downsampleIndex = randomAlphaOfLengthBetween(10, 100); - ReplaceSourceWithDownsampleIndexTask withoutListener = new ReplaceSourceWithDownsampleIndexTask( - dataStreamName, - sourceBackingIndex, - downsampleIndex, - null - ); - - ReplaceSourceWithDownsampleIndexTask withListener = new ReplaceSourceWithDownsampleIndexTask( - dataStreamName, - sourceBackingIndex, - downsampleIndex, - new ActionListener<>() { - @Override - public void onResponse(Void unused) { - - } - - @Override - public void onFailure(Exception e) { - - } - } - ); - - assertThat(withoutListener.equals(withListener), is(true)); + assertThat(newState.metadata().getIndicesLookup().get(firstGenIndex), is(nullValue())); } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutorTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutorTests.java deleted file mode 100644 index ba501a17bbcf4..0000000000000 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutorTests.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.lifecycle.downsampling; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.client.NoOpClient; - -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.hamcrest.Matchers.is; - -public class ReplaceBackingWithDownsampleIndexExecutorTests extends ESTestCase { - - public void testExecutorDeletesTheSourceIndexWhenTaskSucceeds() { - String dataStreamName = randomAlphaOfLengthBetween(10, 100); - String sourceIndex = randomAlphaOfLengthBetween(10, 100); - String downsampleIndex = randomAlphaOfLengthBetween(10, 100); - - try (Client client = new NoOpClient(getTestName()) { - @Override - protected void doExecute( - ActionType action, - Request request, - ActionListener listener - ) { - assertThat(action.name(), is(DeleteIndexAction.NAME)); - assertTrue(request instanceof DeleteIndexRequest); - DeleteIndexRequest deleteIndexRequest = (DeleteIndexRequest) request; - assertThat(deleteIndexRequest.indices().length, is(1)); - assertThat(deleteIndexRequest.indices()[0], is(sourceIndex)); - } - }) { - ReplaceBackingWithDownsampleIndexExecutor executor = new ReplaceBackingWithDownsampleIndexExecutor(client); - - AtomicBoolean taskListenerCalled = new AtomicBoolean(false); - executor.taskSucceeded( - new ReplaceSourceWithDownsampleIndexTask(dataStreamName, sourceIndex, downsampleIndex, new ActionListener() { - @Override - public void onResponse(Void unused) { - taskListenerCalled.set(true); - } - - @Override - public void onFailure(Exception e) { - logger.error(e.getMessage(), e); - fail("unexpected exception: " + e.getMessage()); - } - }), - null - ); - assertThat(taskListenerCalled.get(), is(true)); - } - } - - public void testExecutorCallsTaskListenerEvenIfDeteleFails() { - String dataStreamName = randomAlphaOfLengthBetween(10, 100); - String sourceIndex = randomAlphaOfLengthBetween(10, 100); - String downsampleIndex = randomAlphaOfLengthBetween(10, 100); - - try (Client client = new NoOpClient(getTestName()) { - @Override - protected void doExecute( - ActionType action, - Request request, - ActionListener listener - ) { - listener.onFailure(new IllegalStateException("simulating a failure to delete index " + sourceIndex)); - } - }) { - ReplaceBackingWithDownsampleIndexExecutor executor = new ReplaceBackingWithDownsampleIndexExecutor(client); - - AtomicBoolean taskListenerCalled = new AtomicBoolean(false); - executor.taskSucceeded( - new ReplaceSourceWithDownsampleIndexTask(dataStreamName, sourceIndex, downsampleIndex, new ActionListener() { - @Override - public void onResponse(Void unused) { - taskListenerCalled.set(true); - } - - @Override - public void onFailure(Exception e) { - logger.error(e.getMessage(), e); - fail("unexpected exception: " + e.getMessage()); - } - }), - null - ); - assertThat(taskListenerCalled.get(), is(true)); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 4444ab72ca15e..ee94008372dab 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -416,10 +416,6 @@ public AllocationService getAllocationService() { return allocationService; } - public MetadataDeleteIndexService getMetadataDeleteIndexService() { - return metadataDeleteIndexService; - } - @Override protected void configure() { bind(GatewayAllocator.class).asEagerSingleton(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index 3fb6eafb5c606..8423a5ad37334 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -155,15 +155,6 @@ static ClusterState updateDataLifecycle( Metadata.Builder builder = Metadata.builder(metadata); for (var dataStreamName : dataStreamNames) { var dataStream = validateDataStream(metadata, dataStreamName); - if (dataStream.isSystem()) { - if (lifecycle != null && lifecycle.getDownsamplingRounds() != null) { - throw new IllegalArgumentException( - "System data streams do not support downsampling as part of their lifecycle configuration. Encountered [" - + dataStream.getName() - + "] in the request" - ); - } - } builder.put( new DataStream( dataStream.getName(), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexService.java index 9a932200f78e1..516be12d56a6b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexService.java @@ -62,7 +62,7 @@ public Tuple executeTask( DeleteIndexClusterStateUpdateRequest task, ClusterState clusterState ) { - return Tuple.tuple(deleteIndices(clusterState, Sets.newHashSet(task.indices())), task); + return Tuple.tuple(MetadataDeleteIndexService.deleteIndices(clusterState, Sets.newHashSet(task.indices()), settings), task); } @Override @@ -90,7 +90,7 @@ public void deleteIndices(final DeleteIndexClusterStateUpdateRequest request) { /** * Delete some indices from the cluster state. */ - public ClusterState deleteIndices(ClusterState currentState, Set indices) { + public static ClusterState deleteIndices(ClusterState currentState, Set indices, Settings settings) { final Metadata meta = currentState.metadata(); final Set indicesToDelete = new HashSet<>(); final Map backingIndices = new HashMap<>(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java index 9a1a05bc4ca8c..fb5acbdd2ac49 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java @@ -54,22 +54,20 @@ public class MetadataIndexAliasesService { private final IndicesService indicesService; - private final MetadataDeleteIndexService deleteIndexService; - private final NamedXContentRegistry xContentRegistry; private final ClusterStateTaskExecutor executor; private final MasterServiceTaskQueue taskQueue; + private final ClusterService clusterService; @Inject public MetadataIndexAliasesService( ClusterService clusterService, IndicesService indicesService, - MetadataDeleteIndexService deleteIndexService, NamedXContentRegistry xContentRegistry ) { + this.clusterService = clusterService; this.indicesService = indicesService; - this.deleteIndexService = deleteIndexService; this.xContentRegistry = xContentRegistry; this.executor = new SimpleBatchedAckListenerTaskExecutor<>() { @@ -110,7 +108,7 @@ public ClusterState applyAliasActions(ClusterState currentState, Iterable componentTemplates - ) { - DataStreamLifecycle resolvedLifecycle = MetadataIndexTemplateService.resolveLifecycle(composableIndexTemplate, componentTemplates); - if (resolvedLifecycle != null && resolvedLifecycle.isEnabled() && resolvedLifecycle.getDownsamplingRounds() != null) { - throw new IllegalArgumentException("System data streams do not support downsampling as part of their lifecycle configuration"); - } - } - public String getDataStreamName() { return dataStreamName; } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 1ae3aaa9e09db..9ccb26dcbe79a 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -916,7 +916,6 @@ protected Node( repositoryService, clusterModule.getAllocationService(), metadataCreateIndexService, - clusterModule.getMetadataDeleteIndexService(), indexMetadataVerifier, shardLimitValidator, systemIndices, diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 589163ab00581..025a1840c04d9 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -175,8 +175,6 @@ public class RestoreService implements ClusterStateApplier { private final IndexMetadataVerifier indexMetadataVerifier; - private final MetadataDeleteIndexService metadataDeleteIndexService; - private final ShardLimitValidator shardLimitValidator; private final ClusterSettings clusterSettings; @@ -196,7 +194,6 @@ public RestoreService( RepositoriesService repositoriesService, AllocationService allocationService, MetadataCreateIndexService createIndexService, - MetadataDeleteIndexService metadataDeleteIndexService, IndexMetadataVerifier indexMetadataVerifier, ShardLimitValidator shardLimitValidator, SystemIndices systemIndices, @@ -209,7 +206,6 @@ public RestoreService( this.allocationService = allocationService; this.createIndexService = createIndexService; this.indexMetadataVerifier = indexMetadataVerifier; - this.metadataDeleteIndexService = metadataDeleteIndexService; if (DiscoveryNode.isMasterNode(clusterService.getSettings())) { clusterService.addStateApplier(this); } @@ -481,6 +477,7 @@ private void startRestore( metadataBuilder.dataStreams(dataStreamsToRestore, dataStreamAliasesToRestore).build(), dataStreamsToRestore.values(), updater, + clusterService.getSettings(), listener ) ); @@ -1208,6 +1205,7 @@ private final class RestoreSnapshotStateTask extends ClusterStateUpdateTask { private final BiConsumer updater; private final AllocationActionListener listener; + private final Settings settings; @Nullable private RestoreInfo restoreInfo; @@ -1221,6 +1219,7 @@ private final class RestoreSnapshotStateTask extends ClusterStateUpdateTask { Metadata metadata, Collection dataStreamsToRestore, BiConsumer updater, + Settings settings, ActionListener listener ) { super(request.masterNodeTimeout()); @@ -1232,6 +1231,7 @@ private final class RestoreSnapshotStateTask extends ClusterStateUpdateTask { this.metadata = metadata; this.dataStreamsToRestore = dataStreamsToRestore; this.updater = updater; + this.settings = settings; this.listener = new AllocationActionListener<>(listener, threadPool.getThreadContext()); } @@ -1241,9 +1241,10 @@ public ClusterState execute(ClusterState currentState) { ensureSnapshotNotDeleted(currentState); // Clear out all existing indices which fall within a system index pattern being restored - currentState = metadataDeleteIndexService.deleteIndices( + currentState = MetadataDeleteIndexService.deleteIndices( currentState, - resolveSystemIndicesToDelete(currentState, featureStatesToRestore) + resolveSystemIndicesToDelete(currentState, featureStatesToRestore), + settings ); // List of searchable snapshots indices to restore diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexServiceTests.java index 8a3de96f02f91..04587018fc9ca 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexServiceTests.java @@ -73,7 +73,10 @@ public void setUp() throws Exception { public void testDeleteMissing() { Index index = new Index("missing", "doesn't matter"); ClusterState state = ClusterState.builder(ClusterName.DEFAULT).build(); - IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> service.deleteIndices(state, Set.of(index))); + IndexNotFoundException e = expectThrows( + IndexNotFoundException.class, + () -> MetadataDeleteIndexService.deleteIndices(state, Set.of(index), Settings.EMPTY) + ); assertEquals(index, e.getIndex()); } @@ -100,7 +103,11 @@ public void testDeleteSnapshotting() { ClusterState state = ClusterState.builder(clusterState(index)).putCustom(SnapshotsInProgress.TYPE, snaps).build(); Exception e = expectThrows( SnapshotInProgressException.class, - () -> service.deleteIndices(state, Set.of(state.metadata().getIndices().get(index).getIndex())) + () -> MetadataDeleteIndexService.deleteIndices( + state, + Set.of(state.metadata().getIndices().get(index).getIndex()), + Settings.EMPTY + ) ); assertEquals( "Cannot delete indices that are being snapshotted: [[" @@ -155,7 +162,11 @@ public void testDeleteIndexWithAnAlias() { .blocks(ClusterBlocks.builder().addBlocks(idxMetadata)) .build(); - ClusterState after = service.deleteIndices(before, Set.of(before.metadata().getIndices().get(index).getIndex())); + ClusterState after = MetadataDeleteIndexService.deleteIndices( + before, + Set.of(before.metadata().getIndices().get(index).getIndex()), + Settings.EMPTY + ); assertNull(after.metadata().getIndices().get(index)); assertNull(after.routingTable().index(index)); @@ -175,7 +186,7 @@ public void testDeleteBackingIndexForDataStream() { int numIndexToDelete = randomIntBetween(1, numBackingIndices - 1); Index indexToDelete = before.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, numIndexToDelete)).getIndex(); - ClusterState after = service.deleteIndices(before, Set.of(indexToDelete)); + ClusterState after = MetadataDeleteIndexService.deleteIndices(before, Set.of(indexToDelete), Settings.EMPTY); assertThat(after.metadata().getIndices().get(indexToDelete.getName()), nullValue()); assertThat(after.metadata().getIndices().size(), equalTo(numBackingIndices - 1)); @@ -200,7 +211,7 @@ public void testDeleteMultipleBackingIndexForDataStream() { for (int k : indexNumbersToDelete) { indicesToDelete.add(before.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, k)).getIndex()); } - ClusterState after = service.deleteIndices(before, indicesToDelete); + ClusterState after = MetadataDeleteIndexService.deleteIndices(before, indicesToDelete, Settings.EMPTY); DataStream dataStream = after.metadata().dataStreams().get(dataStreamName); assertThat(dataStream, notNullValue()); @@ -221,7 +232,10 @@ public void testDeleteCurrentWriteIndexForDataStream() { ); Index indexToDelete = before.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, numBackingIndices)).getIndex(); - Exception e = expectThrows(IllegalArgumentException.class, () -> service.deleteIndices(before, Set.of(indexToDelete))); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> MetadataDeleteIndexService.deleteIndices(before, Set.of(indexToDelete), Settings.EMPTY) + ); assertThat( e.getMessage(), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java index 1fbd578cc1bed..0901b1190cfc0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java @@ -17,19 +17,25 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Tuple; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.index.IndexVersionUtils; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.TimeUnit; import static java.util.Collections.singletonList; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; @@ -42,33 +48,32 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anySet; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class MetadataIndexAliasesServiceTests extends ESTestCase { - private final MetadataDeleteIndexService deleteIndexService = mock(MetadataDeleteIndexService.class); - private final MetadataIndexAliasesService service = new MetadataIndexAliasesService( - mock(ClusterService.class), - null, - deleteIndexService, - xContentRegistry() - ); - - public MetadataIndexAliasesServiceTests() { - // Mock any deletes so we don't need to worry about how MetadataDeleteIndexService does its job - when(deleteIndexService.deleteIndices(any(ClusterState.class), anySet())).then(i -> { - ClusterState state = (ClusterState) i.getArguments()[0]; - @SuppressWarnings("unchecked") - Collection indices = (Collection) i.getArguments()[1]; - Metadata.Builder meta = Metadata.builder(state.metadata()); - for (Index index : indices) { - assertTrue("index now found", state.metadata().hasIndexAbstraction(index.getName())); - meta.remove(index.getName()); // We only think about metadata for this test. Not routing or any other fun stuff. - } - return ClusterState.builder(state).metadata(meta).build(); - }); + private static TestThreadPool threadPool; + private ClusterService clusterService; + private MetadataIndexAliasesService service; + + @BeforeClass + public static void setupThreadPool() { + threadPool = new TestThreadPool(getTestClass().getName()); + } + + @Before + public void setupServices() { + clusterService = ClusterServiceUtils.createClusterService(threadPool); + service = new MetadataIndexAliasesService(clusterService, null, xContentRegistry()); + } + + @After + public void closeClusterService() throws Exception { + clusterService.close(); + } + + @AfterClass + public static void tearDownThreadPool() { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; } public void testAddAndRemove() { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 67a34bf4a08e9..336f813a47a59 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -1961,7 +1961,6 @@ protected void assertSnapshotOrGenericThread() { repositoriesService, allocationService, metadataCreateIndexService, - new MetadataDeleteIndexService(settings, clusterService, allocationService), new IndexMetadataVerifier( settings, clusterService, diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 0466e565ad95a..8c60045b13ede 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -523,7 +523,7 @@ public static MetadataRolloverService getMetadataRolloverService( false, new IndexSettingProviders(providers) ); - MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService(clusterService, indicesService, null, registry); + MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService(clusterService, indicesService, registry); return new MetadataRolloverService( testThreadPool, createIndexService, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java index 2739f64986439..652d6815eea46 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java @@ -165,8 +165,9 @@ public class InternalUsers { ForceMergeAction.NAME + "*", // indices stats is used by rollover, so we need to grant it here IndicesStatsAction.NAME + "*", - UpdateSettingsAction.NAME - // Down-sampling related actions are not granted here because down-sampling is not supported for system data streams + UpdateSettingsAction.NAME, + DownsampleAction.NAME, + AddIndexBlockAction.NAME ) .allowRestrictedIndices(true) .build() }, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java index 36562474d036b..6603353e967ea 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java @@ -254,7 +254,9 @@ public void testDataStreamLifecycleUser() { DeleteIndexAction.NAME, ForceMergeAction.NAME, IndicesStatsAction.NAME, - UpdateSettingsAction.NAME + UpdateSettingsAction.NAME, + DownsampleAction.NAME, + AddIndexBlockAction.NAME ); final String dataStream = randomAlphaOfLengthBetween(3, 12); checkIndexAccess(role, randomFrom(sampleIndexActions), dataStream, true); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java index 49ac36b854298..8311d0f613175 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java @@ -35,7 +35,6 @@ import org.elasticsearch.datastreams.DataStreamsPlugin; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; -import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; @@ -95,11 +94,11 @@ protected Collection> nodePlugins() { return List.of( LocalStateSecurity.class, DataStreamsPlugin.class, - SystemDataStreamTestPlugin.class, MapperExtrasPlugin.class, Wildcard.class, Downsample.class, - AggregateMetricMapperPlugin.class + AggregateMetricMapperPlugin.class, + SystemDataStreamWithDownsamplingConfigurationPlugin.class ); } @@ -135,70 +134,27 @@ public void testDownsamplingAuthorized() throws Exception { waitAndAssertDownsamplingCompleted(dataStreamName); } - public void testConfiguringLifecycleWithDownsamplingForSystemDataStreamFails() { - String dataStreamName = SystemDataStreamTestPlugin.SYSTEM_DATA_STREAM_NAME; - indexDocuments(client(), dataStreamName, 100); - DataStreamLifecycle lifecycle = DataStreamLifecycle.newBuilder() - .downsampling( - new DataStreamLifecycle.Downsampling( - List.of( - new DataStreamLifecycle.Downsampling.Round( - TimeValue.timeValueMillis(0), - new DownsampleConfig(new DateHistogramInterval("5m")) - ), - new DataStreamLifecycle.Downsampling.Round( - TimeValue.timeValueSeconds(10), - new DownsampleConfig(new DateHistogramInterval("10m")) - ) - ) - ) - ) - .build(); - IllegalArgumentException illegalArgumentException = expectThrows( - IllegalArgumentException.class, - () -> client().execute( - PutDataStreamLifecycleAction.INSTANCE, - new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, lifecycle) - ).actionGet() - ); - assertThat( - illegalArgumentException.getMessage(), - is( - "System data streams do not support downsampling as part of their lifecycle " - + "configuration. Encountered [" - + dataStreamName - + "] in the request" - ) - ); - } - - public void testExplicitSystemDataStreamConfigurationWithDownsamplingFails() { - SystemDataStreamWithDownsamplingConfigurationPlugin pluginWithIllegalSystemDataStream = - new SystemDataStreamWithDownsamplingConfigurationPlugin(); - IllegalArgumentException illegalArgumentException = expectThrows( - IllegalArgumentException.class, - () -> pluginWithIllegalSystemDataStream.getSystemDataStreamDescriptors() - ); - assertThat( - illegalArgumentException.getMessage(), - is("System data streams do not support downsampling as part of their lifecycle configuration") - ); + @TestLogging(value = "org.elasticsearch.datastreams.lifecycle:TRACE", reason = "debugging") + public void testSystemDataStreamConfigurationWithDownsampling() throws Exception { + String dataStreamName = SystemDataStreamWithDownsamplingConfigurationPlugin.SYSTEM_DATA_STREAM_NAME; + indexDocuments(client(), dataStreamName, 10_000); + waitAndAssertDownsamplingCompleted(dataStreamName); } private void waitAndAssertDownsamplingCompleted(String dataStreamName) throws Exception { List backingIndices = getDataStreamBackingIndices(dataStreamName); String firstGenerationBackingIndex = backingIndices.get(0).getName(); - String oneSecondDownsampleIndex = "downsample-5m-" + firstGenerationBackingIndex; - String tenSecondsDownsampleIndex = "downsample-10m-" + firstGenerationBackingIndex; + String firstRoundDownsamplingIndex = "downsample-5m-" + firstGenerationBackingIndex; + String secondRoundDownsamplingIndex = "downsample-10m-" + firstGenerationBackingIndex; Set witnessedDownsamplingIndices = new HashSet<>(); clusterService().addListener(event -> { - if (event.indicesCreated().contains(oneSecondDownsampleIndex) - || event.indicesDeleted().stream().anyMatch(index -> index.getName().equals(oneSecondDownsampleIndex))) { - witnessedDownsamplingIndices.add(oneSecondDownsampleIndex); + if (event.indicesCreated().contains(firstRoundDownsamplingIndex) + || event.indicesDeleted().stream().anyMatch(index -> index.getName().equals(firstRoundDownsamplingIndex))) { + witnessedDownsamplingIndices.add(firstRoundDownsamplingIndex); } - if (event.indicesCreated().contains(tenSecondsDownsampleIndex)) { - witnessedDownsamplingIndices.add(tenSecondsDownsampleIndex); + if (event.indicesCreated().contains(secondRoundDownsamplingIndex)) { + witnessedDownsamplingIndices.add(secondRoundDownsamplingIndex); } }); @@ -207,15 +163,15 @@ private void waitAndAssertDownsamplingCompleted(String dataStreamName) throws Ex assertBusy(() -> { assertNoAuthzErrors(); // first downsampling round - assertThat(witnessedDownsamplingIndices.contains(oneSecondDownsampleIndex), is(true)); + assertThat(witnessedDownsamplingIndices.contains(firstRoundDownsamplingIndex), is(true)); }, 30, TimeUnit.SECONDS); assertBusy(() -> { assertNoAuthzErrors(); assertThat(witnessedDownsamplingIndices.size(), is(2)); - assertThat(witnessedDownsamplingIndices.contains(oneSecondDownsampleIndex), is(true)); + assertThat(witnessedDownsamplingIndices.contains(firstRoundDownsamplingIndex), is(true)); - assertThat(witnessedDownsamplingIndices.contains(tenSecondsDownsampleIndex), is(true)); + assertThat(witnessedDownsamplingIndices.contains(secondRoundDownsamplingIndex), is(true)); }, 30, TimeUnit.SECONDS); assertBusy(() -> { @@ -226,9 +182,9 @@ private void waitAndAssertDownsamplingCompleted(String dataStreamName) throws Ex String writeIndex = dsBackingIndices.get(1).getName(); assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2)); // the last downsampling round must remain in the data stream - assertThat(dsBackingIndices.get(0).getName(), is(tenSecondsDownsampleIndex)); + assertThat(dsBackingIndices.get(0).getName(), is(secondRoundDownsamplingIndex)); assertThat(indexExists(firstGenerationBackingIndex), is(false)); - assertThat(indexExists(oneSecondDownsampleIndex), is(false)); + assertThat(indexExists(firstRoundDownsamplingIndex), is(false)); }, 30, TimeUnit.SECONDS); } @@ -378,55 +334,6 @@ private void bulkIndex(Client client, String dataStreamName, Supplier Indexed [{}] documents. Dropped [{}] duplicates.", docsIndexed, duplicates); } - public static class SystemDataStreamTestPlugin extends Plugin implements SystemIndexPlugin { - - static final String SYSTEM_DATA_STREAM_NAME = ".fleet-actions-results"; - - @Override - public Collection getSystemDataStreamDescriptors() { - Settings.Builder settings = indexSettings(1, 0).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) - .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)); - - try { - return List.of( - new SystemDataStreamDescriptor( - SYSTEM_DATA_STREAM_NAME, - "a system data stream for testing", - SystemDataStreamDescriptor.Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(SYSTEM_DATA_STREAM_NAME), - new Template(settings.build(), getTSDBMappings(), null, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ), - Map.of(), - Collections.singletonList("test"), - new ExecutorNames( - ThreadPool.Names.SYSTEM_CRITICAL_READ, - ThreadPool.Names.SYSTEM_READ, - ThreadPool.Names.SYSTEM_WRITE - ) - ) - ); - } catch (IOException e) { - throw new RuntimeException("Unable to create system data stream descriptor", e); - } - } - - @Override - public String getFeatureName() { - return SystemDataStreamTestPlugin.class.getSimpleName(); - } - - @Override - public String getFeatureDescription() { - return "A plugin for testing the data stream lifecycle runtime actions on system data streams"; - } - } - public static class SystemDataStreamWithDownsamplingConfigurationPlugin extends Plugin implements SystemIndexPlugin { static final String SYSTEM_DATA_STREAM_NAME = ".fleet-actions-results"; @@ -484,7 +391,7 @@ public Collection getSystemDataStreamDescriptors() { @Override public String getFeatureName() { - return SystemDataStreamTestPlugin.class.getSimpleName(); + return SystemDataStreamWithDownsamplingConfigurationPlugin.class.getSimpleName(); } @Override From 2998547c1d846ef99e1b7b02854cd42b34220071 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Wed, 20 Sep 2023 10:37:24 +0200 Subject: [PATCH 19/27] Add a version for storing PrimaryTermGeneration in a commit (#99683) --- server/src/main/java/org/elasticsearch/TransportVersions.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 16a99d38f6623..d9c0bc59c06f9 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -141,6 +141,7 @@ static TransportVersion def(int id) { public static final TransportVersion V_8_500_074 = def(8_500_074); public static final TransportVersion NODE_INFO_INDEX_VERSION_ADDED = def(8_500_075); public static final TransportVersion FIRST_NEW_ID_LAYOUT = def(8_501_00_0); + public static final TransportVersion COMMIT_PRIMARY_TERM_GENERATION = def(8_501_00_1); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ From 68ddb19c01a01e7ece03ef6cf52a8ef6c80a6270 Mon Sep 17 00:00:00 2001 From: Alexander Spies Date: Wed, 20 Sep 2023 11:48:40 +0200 Subject: [PATCH 20/27] ESQL: Enable arithmetics for durations and periods (#99432) - Enable date math expressions like (1year + 1month) + now() and (1hour + 1second) + now(). - To achieve this, enable folding of + and - for duration/period literals. - Provide user-friendly error messages for date time arithmetic overflows, and for illegal expressions like eval x = 1week + 1month whose type is not representable. Partially solves #99293. --- docs/changelog/99432.yaml | 5 + .../xpack/esql/qa/rest/RestEsqlTestCase.java | 42 +++++- .../src/main/resources/date.csv-spec | 105 ++++++++++++++- .../xpack/esql/analysis/Verifier.java | 15 +++ .../predicate/operator/arithmetic/Add.java | 12 ++ .../DateTimeArithmeticOperation.java | 127 +++++++++++++++--- .../arithmetic/EsqlArithmeticOperation.java | 2 +- .../predicate/operator/arithmetic/Sub.java | 15 ++- .../xpack/esql/type/EsqlDataTypeRegistry.java | 8 ++ .../xpack/esql/analysis/VerifierTests.java | 15 +++ .../function/AbstractFunctionTestCase.java | 4 +- .../operator/arithmetic/AddTests.java | 24 ++++ .../operator/arithmetic/SubTests.java | 24 ++++ 13 files changed, 371 insertions(+), 27 deletions(-) create mode 100644 docs/changelog/99432.yaml diff --git a/docs/changelog/99432.yaml b/docs/changelog/99432.yaml new file mode 100644 index 0000000000000..df4c5a7f78199 --- /dev/null +++ b/docs/changelog/99432.yaml @@ -0,0 +1,5 @@ +pr: 99432 +summary: "ESQL: Enable arithmetics for durations and periods" +area: ES|QL +type: enhancement +issues: [99293] diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 7d90cf47cae09..82c5f65d210ce 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -336,7 +336,47 @@ public void testErrorMessageForInvalidTypeInParams() throws IOException { ResponseException.class, () -> runEsql(new RequestObjectBuilder().query("row a = 1 | eval x = ?").params("[{\"type\": \"byte\", \"value\": 5}]").build()) ); - assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("illegal data type [byte]")); + assertThat( + EntityUtils.toString(re.getResponse().getEntity()), + containsString("EVAL does not support type [byte] in expression [?]") + ); + } + + public void testErrorMessageForLiteralDateMathOverflow() throws IOException { + List datePeriodOverflowExpressions = List.of( + "2147483647 day + 1 day", + "306783378 week + 1 week", + "2147483647 year + 1 year" + ); + // We cannot easily force an overflow using just milliseconds, since these are divided by 1000 and then the resulting seconds are + // stored in a long. But combining with seconds works. + List timeDurationOverflowExpressions = List.of( + "9223372036854775807 second + 1000 millisecond", + "9223372036854775807 second + 1 second", + "153722867280912930 minute + 1 minute", + "2562047788015215 hour + 1 hour" + ); + + for (String overflowExp : datePeriodOverflowExpressions) { + assertDateMathOverflow(overflowExp, "integer overflow"); + } + for (String overflowExp : timeDurationOverflowExpressions) { + assertDateMathOverflow(overflowExp, "long overflow"); + } + } + + private static void assertDateMathOverflow(String overflowExpression, String expectedOverflowMessage) throws IOException { + ResponseException re = expectThrows( + ResponseException.class, + () -> runEsql(new RequestObjectBuilder().query("row a = 1 | eval x = now() + (" + overflowExpression + ")").build()) + ); + + String responseMessage = EntityUtils.toString(re.getResponse().getEntity()); + assertThat(responseMessage, containsString("arithmetic exception in expression [" + overflowExpression + "]:")); + // The second part of the error message might come after a newline, so we check for it separately. + assertThat(responseMessage, containsString("[" + expectedOverflowMessage + "]")); + + assertThat(re.getResponse().getStatusLine().getStatusCode(), equalTo(400)); } public void testErrorMessageForArrayValuesInParams() throws IOException { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index 41b068595b4cd..5adecec275682 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -465,12 +465,27 @@ dt:datetime |plus:datetime 2100-01-01T01:01:01.000Z |2104-04-16T01:01:01.000Z ; -datePlusDuration -row dt = to_dt("2100-01-01T00:00:00.000Z") -| eval plus = dt + 1 hour + 1 minute + 1 second + 1 milliseconds; +datePlusPeriodFromLeft +row n = to_dt("1953-04-04T00:00:00.000Z") | eval then = 4 years + 3 months + 2 weeks + 1 day + n | keep then; -dt:datetime |plus:datetime -2100-01-01T00:00:00.000Z |2100-01-01T01:01:01.001Z +then:datetime +1957-07-19T00:00:00.000Z +; + +datePlusMixedPeriodsFromLeft +row n = to_dt("1953-04-01T00:00:00.000Z") +| eval then = 4 years + 3 months + 1 year + 2 weeks + 1 month + 1 day + 1 week + 1 day + n +| keep then; + +then:datetime +1958-08-24T00:00:00.000Z +; + +datePlusSumOfPeriodsFromLeft +row n = to_dt("1953-04-04T00:00:00.000Z") | eval then = (4 years + 3 months + 2 weeks + 1 day) + n | keep then; + +then:datetime +1957-07-19T00:00:00.000Z ; dateMinusPeriod @@ -481,6 +496,61 @@ dt:datetime |minus:datetime 2104-04-16T01:01:01.000Z |2100-01-01T01:01:01.000Z ; +dateMinusPeriodFromLeft +row n = to_dt("1957-07-19T00:00:00.000Z") | eval then = -4 years - 3 months - 2 weeks - 1 day + n | keep then; + +then:datetime +1953-04-04T00:00:00.000Z +; + +dateMinusSumOfNegativePeriods +row n = to_dt("1953-04-04T00:00:00.000Z") | eval then = n - (-4 years - 3 months - 2 weeks - 1 day)| keep then; + +then:datetime +1957-07-19T00:00:00.000Z +; + +dateMinusPeriodsFromLeftMultipleEvals +row n = to_dt("1953-04-04T00:00:00.000Z") +| eval x = -4 years + n +| eval y = -3 months + x, then = y + (-2 weeks - 1 day) +| keep then; + +then:datetime +1948-12-20T00:00:00.000Z +; + +datePlusDuration +row dt = to_dt("2100-01-01T00:00:00.000Z") +| eval plus = dt + 1 hour + 1 minute + 1 second + 1 milliseconds; + +dt:datetime |plus:datetime +2100-01-01T00:00:00.000Z |2100-01-01T01:01:01.001Z +; + +datePlusDurationFromLeft +row n = to_dt("1953-04-04T00:00:00.000Z") | eval then = 1 hour + 1 minute + 1 second + 1 milliseconds + n | keep then; + +then:datetime +1953-04-04T01:01:01.001Z +; + +datePlusMixedDurationsFromLeft +row n = to_dt("1953-04-04T00:00:00.000Z") +| eval then = 1 hour + 1 minute + 2 hour + 1 second + 2 minute + 1 milliseconds + 2 second + 2 millisecond + n +| keep then; + +then:datetime +1953-04-04T03:03:03.003Z +; + +datePlusSumOfDurationsFromLeft +row n = to_dt("1953-04-04T00:00:00.000Z") | eval then = (1 hour + 1 minute + 1 second + 1 milliseconds) + n | keep then; + +then:datetime +1953-04-04T01:01:01.001Z +; + dateMinusDuration row dt = to_dt("2100-01-01T01:01:01.001Z") | eval minus = dt - 1 hour - 1 minute - 1 second - 1 milliseconds; @@ -489,6 +559,31 @@ dt:datetime |minus:datetime 2100-01-01T01:01:01.001Z |2100-01-01T00:00:00.000Z ; +dateMinusDurationFromLeft +row n = to_dt("1953-04-04T01:01:01.001Z") | eval then = -1 hour - 1 minute - 1 second - 1 milliseconds + n | keep then; + +then:datetime +1953-04-04T00:00:00.000Z +; + +dateMinusSumOfNegativeDurations +row n = to_dt("1953-04-04T00:00:00.000Z") | eval then = n - (-1 hour - 1 minute - 1 second - 1 milliseconds) | keep then; + +then:datetime +1953-04-04T01:01:01.001Z +; + +dateMinusDurationsFromLeftMultipleEvals +row n = to_dt("1953-04-04T04:03:02.001Z") +| eval x = -4 hour + n +| eval y = -3 minute + x, then = y + (-2 second - 1 millisecond) +| keep then +; + +then:datetime +1953-04-04T00:00:00.000Z +; + datePlusPeriodAndDuration row dt = to_dt("2100-01-01T00:00:00.000Z") | eval plus = dt + 4 years + 3 months + 2 weeks + 1 day + 1 hour + 1 minute + 1 second + 1 milliseconds; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index 4c79b2453f8e3..1f5e6ee3fd6ed 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -172,6 +172,8 @@ else if (p.resolved()) { } } else if (p instanceof Row row) { failures.addAll(validateRow(row)); + } else if (p instanceof Eval eval) { + failures.addAll(validateEval(eval)); } p.forEachExpression(BinaryOperator.class, bo -> { @@ -236,6 +238,19 @@ private static Collection validateRow(Row row) { return failures; } + private static Collection validateEval(Eval eval) { + List failures = new ArrayList<>(eval.fields().size()); + eval.fields().forEach(field -> { + DataType dataType = field.dataType(); + if (EsqlDataTypes.isRepresentable(dataType) == false) { + failures.add( + fail(field, "EVAL does not support type [{}] in expression [{}]", dataType.typeName(), field.child().sourceText()) + ); + } + }); + return failures; + } + /** * Limit QL's comparisons to types we support. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java index 7da2754bfd931..f59211ab42882 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java @@ -15,6 +15,8 @@ import org.elasticsearch.xpack.ql.tree.Source; import java.time.DateTimeException; +import java.time.Duration; +import java.time.Period; import java.time.temporal.TemporalAmount; import static org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation.OperationSymbol.ADD; @@ -88,4 +90,14 @@ static long processDatetimes(long datetime, @Fixed TemporalAmount temporalAmount // using a UTC conversion since `datetime` is always a UTC-Epoch timestamp, either read from ES or converted through a function return asMillis(asDateTime(datetime).plus(temporalAmount)); } + + @Override + public Period fold(Period left, Period right) { + return left.plus(right); + } + + @Override + public Duration fold(Duration left, Duration right) { + return left.plus(right); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java index 3780e19a1dfd9..19552d4e873cd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java @@ -9,21 +9,45 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.esql.EsqlClientException; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; +import java.time.Duration; +import java.time.Period; import java.time.temporal.TemporalAmount; import java.util.function.Function; -import java.util.function.Predicate; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.DATE_PERIOD; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.TIME_DURATION; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; abstract class DateTimeArithmeticOperation extends EsqlArithmeticOperation { + /** + * Custom exception to handle e.g. overflows when folding temporal values; we want to set the correct HTTP status (400). + */ + private static class IllegalTemporalValueException extends EsqlClientException { + protected IllegalTemporalValueException(String message, Object... args) { + super(message, args); + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + + public static IllegalTemporalValueException fromArithmeticException(Source source, ArithmeticException e) { + return new IllegalTemporalValueException("arithmetic exception in expression [{}]: [{}]", source.text(), e.getMessage()); + } + } + /** Arithmetic (quad) function. */ interface DatetimeArithmeticEvaluator { ExpressionEvaluator apply( @@ -55,31 +79,100 @@ ExpressionEvaluator apply( protected TypeResolution resolveType() { DataType leftType = left().dataType(); DataType rightType = right().dataType(); - // date math is only possible if one argument is a DATETIME and the other a (foldable) TemporalValue + + // Date math is only possible if either + // - one argument is a DATETIME and the other a (foldable) TemporalValue, or + // - both arguments are TemporalValues (so we can fold them). if (isDateTimeOrTemporal(leftType) || isDateTimeOrTemporal(rightType)) { - if (argumentOfType(DataTypes::isDateTime) == null || argumentOfType(EsqlDataTypes::isTemporalAmount) == null) { - return new TypeResolution( - format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), leftType, rightType) - ); + if ((leftType == DataTypes.DATETIME && isTemporalAmount(rightType)) + || (rightType == DataTypes.DATETIME && isTemporalAmount(leftType))) { + return TypeResolution.TYPE_RESOLVED; + } + if (leftType == TIME_DURATION && rightType == TIME_DURATION) { + return TypeResolution.TYPE_RESOLVED; + } + if (leftType == DATE_PERIOD && rightType == DATE_PERIOD) { + return TypeResolution.TYPE_RESOLVED; } - return TypeResolution.TYPE_RESOLVED; + + return new TypeResolution( + format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), leftType, rightType) + ); } return super.resolveType(); } + /** + * Override this to allow processing literals of type {@link EsqlDataTypes#DATE_PERIOD} when folding constants. + * Used in {@link DateTimeArithmeticOperation#fold()}. + * @param left the left period + * @param right the right period + * @return the result of the evaluation + */ + abstract Period fold(Period left, Period right); + + /** + * Override this to allow processing literals of type {@link EsqlDataTypes#TIME_DURATION} when folding constants. + * Used in {@link DateTimeArithmeticOperation#fold()}. + * @param left the left duration + * @param right the right duration + * @return the result of the evaluation + */ + abstract Duration fold(Duration left, Duration right); + + @Override + public final Object fold() { + DataType leftDataType = left().dataType(); + DataType rightDataType = right().dataType(); + if (leftDataType == DATE_PERIOD && rightDataType == DATE_PERIOD) { + // Both left and right expressions are temporal amounts; we can assume they are both foldable. + Period l = (Period) left().fold(); + Period r = (Period) right().fold(); + try { + return fold(l, r); + } catch (ArithmeticException e) { + // Folding will be triggered before the plan is sent to the compute service, so we have to handle arithmetic exceptions + // manually and provide a user-friendly error message. + throw IllegalTemporalValueException.fromArithmeticException(source(), e); + } + } + if (leftDataType == TIME_DURATION && rightDataType == TIME_DURATION) { + // Both left and right expressions are temporal amounts; we can assume they are both foldable. + Duration l = (Duration) left().fold(); + Duration r = (Duration) right().fold(); + try { + return fold(l, r); + } catch (ArithmeticException e) { + // Folding will be triggered before the plan is sent to the compute service, so we have to handle arithmetic exceptions + // manually and provide a user-friendly error message. + throw IllegalTemporalValueException.fromArithmeticException(source(), e); + } + } + return super.fold(); + } + @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { - return dataType() == DataTypes.DATETIME - ? dvrCtx -> datetimes.apply( + if (dataType() == DataTypes.DATETIME) { + // One of the arguments has to be a datetime and the other a temporal amount. + Expression datetimeArgument; + Expression temporalAmountArgument; + if (left().dataType() == DataTypes.DATETIME) { + datetimeArgument = left(); + temporalAmountArgument = right(); + } else { + datetimeArgument = right(); + temporalAmountArgument = left(); + } + + return dvrCtx -> datetimes.apply( source(), - toEvaluator.apply(argumentOfType(DataTypes::isDateTime)).get(dvrCtx), - (TemporalAmount) argumentOfType(EsqlDataTypes::isTemporalAmount).fold(), + toEvaluator.apply(datetimeArgument).get(dvrCtx), + (TemporalAmount) temporalAmountArgument.fold(), dvrCtx - ) - : super.toEvaluator(toEvaluator); - } - - private Expression argumentOfType(Predicate filter) { - return filter.test(left().dataType()) ? left() : filter.test(right().dataType()) ? right() : null; + ); + } else { + return super.toEvaluator(toEvaluator); + } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java index 5a417134c96fc..d09ae25d91746 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java @@ -104,7 +104,7 @@ ExpressionEvaluator apply( } @Override - public final Object fold() { + public Object fold() { return EvaluatorMapper.super.fold(); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java index b00346b8cceb7..ba071c05a15a8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java @@ -17,6 +17,8 @@ import org.elasticsearch.xpack.ql.type.DataTypes; import java.time.DateTimeException; +import java.time.Duration; +import java.time.Period; import java.time.temporal.TemporalAmount; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; @@ -44,7 +46,8 @@ public Sub(Source source, Expression left, Expression right) { @Override protected TypeResolution resolveType() { TypeResolution resolution = super.resolveType(); - if (resolution.resolved() && EsqlDataTypes.isDateTimeOrTemporal(dataType()) && DataTypes.isDateTime(left().dataType()) == false) { + // As opposed to general date time arithmetics, we cannot subtract a datetime from something else. + if (resolution.resolved() && EsqlDataTypes.isDateTimeOrTemporal(dataType()) && DataTypes.isDateTime(right().dataType())) { return new TypeResolution( format( null, @@ -100,4 +103,14 @@ static long processDatetimes(long datetime, @Fixed TemporalAmount temporalAmount // using a UTC conversion since `datetime` is always a UTC-Epoch timestamp, either read from ES or converted through a function return asMillis(asDateTime(datetime).minus(temporalAmount)); } + + @Override + public Period fold(Period left, Period right) { + return left.minus(right); + } + + @Override + public Duration fold(Duration left, Duration right) { + return left.minus(right); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java index ff6e7f4aa2736..83dd0ff4ed1c6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java @@ -15,6 +15,8 @@ import java.util.Collection; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.DATE_PERIOD; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.TIME_DURATION; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; @@ -63,6 +65,12 @@ public DataType commonType(DataType left, DataType right) { if (isDateTime(left) && isTemporalAmount(right) || isTemporalAmount(left) && isDateTime(right)) { return DataTypes.DATETIME; } + if (left == TIME_DURATION && right == TIME_DURATION) { + return TIME_DURATION; + } + if (left == DATE_PERIOD && right == DATE_PERIOD) { + return DATE_PERIOD; + } return DataTypeConverter.commonType(left, right); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 8740b04298c23..3c1a9800d6d11 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -269,6 +269,21 @@ public void testSubtractDateTimeFromTemporal() { } } + public void testPeriodAndDurationInEval() { + for (var unit : List.of("millisecond", "second", "minute", "hour")) { + assertEquals( + "1:18: EVAL does not support type [time_duration] in expression [1 " + unit + "]", + error("row x = 1 | eval y = 1 " + unit) + ); + } + for (var unit : List.of("day", "week", "month", "year")) { + assertEquals( + "1:18: EVAL does not support type [date_period] in expression [1 " + unit + "]", + error("row x = 1 | eval y = 1 " + unit) + ); + } + } + private String error(String query) { return error(query, defaultAnalyzer); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 017034eba9c64..d7c962ae15a20 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -87,14 +87,14 @@ public static Literal randomLiteral(DataType type) { case "short" -> randomShort(); case "integer" -> randomInt(); case "unsigned_long", "long" -> randomLong(); - case "date_period" -> Period.ofDays(randomInt(10)); + case "date_period" -> Period.of(randomIntBetween(-1000, 1000), randomIntBetween(-13, 13), randomIntBetween(-32, 32)); case "datetime" -> randomMillisUpToYear9999(); case "double", "scaled_float" -> randomDouble(); case "float" -> randomFloat(); case "half_float" -> HalfFloatPoint.sortableShortToHalfFloat(HalfFloatPoint.halfFloatToSortableShort(randomFloat())); case "keyword" -> new BytesRef(randomAlphaOfLength(5)); case "ip" -> new BytesRef(InetAddressPoint.encode(randomIp(randomBoolean()))); - case "time_duration" -> Duration.ofMillis(randomNonNegativeLong()); + case "time_duration" -> Duration.ofNanos(randomLongBetween(-604800000000000L, 604800000000000L)); case "text" -> new BytesRef(randomAlphaOfLength(50)); case "version" -> new Version(randomIdentifier()).toBytesRef(); case "null" -> null; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java index 805eb3fd557c9..454c8d2ae5a6e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java @@ -115,6 +115,18 @@ public static Iterable parameters() { DataTypes.DATETIME, equalTo(asMillis(asDateTime(rhs).plus(lhs))) ); + }), new TestCaseSupplier("Period + Period", () -> { + Period lhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value(); + Period rhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, EsqlDataTypes.DATE_PERIOD, "lhs"), + new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.DATE_PERIOD, "rhs") + ), + "Only folding possible, so there's no evaluator", + EsqlDataTypes.DATE_PERIOD, + equalTo(lhs.plus(rhs)) + ); }), new TestCaseSupplier("Datetime + Duration", () -> { long lhs = (Long) randomLiteral(DataTypes.DATETIME).value(); Duration rhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); @@ -139,6 +151,18 @@ public static Iterable parameters() { DataTypes.DATETIME, equalTo(asMillis(asDateTime(lhs).plus(rhs))) ); + }), new TestCaseSupplier("Duration + Duration", () -> { + Duration lhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); + Duration rhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, EsqlDataTypes.TIME_DURATION, "lhs"), + new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.TIME_DURATION, "rhs") + ), + "Only folding possible, so there's no evaluator", + EsqlDataTypes.TIME_DURATION, + equalTo(lhs.plus(rhs)) + ); }))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java index 11496154f0809..4a9056b0de594 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java @@ -103,6 +103,18 @@ public static Iterable parameters() { DataTypes.DATETIME, equalTo(asMillis(asDateTime(lhs).minus(rhs))) ); + }), new TestCaseSupplier("Period - Period", () -> { + Period lhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value(); + Period rhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, EsqlDataTypes.DATE_PERIOD, "lhs"), + new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.DATE_PERIOD, "rhs") + ), + "Only folding possible, so there's no evaluator", + EsqlDataTypes.DATE_PERIOD, + equalTo(lhs.minus(rhs)) + ); }), new TestCaseSupplier("Datetime - Duration", () -> { long lhs = (Long) randomLiteral(DataTypes.DATETIME).value(); Duration rhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); @@ -116,6 +128,18 @@ public static Iterable parameters() { equalTo(asMillis(asDateTime(lhs).minus(rhs))) ); return testCase; + }), new TestCaseSupplier("Duration - Duration", () -> { + Duration lhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); + Duration rhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, EsqlDataTypes.TIME_DURATION, "lhs"), + new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.TIME_DURATION, "rhs") + ), + "Only folding possible, so there's no evaluator", + EsqlDataTypes.TIME_DURATION, + equalTo(lhs.minus(rhs)) + ); }))); } From d87aed26bfe3688a3398ad93f4de25c673c13849 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Wed, 20 Sep 2023 13:15:56 +0200 Subject: [PATCH 21/27] [Profiling] Allow to wait until resources created (#99655) With this commit we introduce a new request parameter `wait_for_resources_created` to the profiling status API. This parameter allows callers to wait until all profiling resources have been created (or the request times out). The default behavior remains the same (i.e. the API call returns immediately). --- docs/changelog/99655.yaml | 5 + .../profiling/GetStackTracesActionIT.java | 5 - .../xpack/profiling/GetStatusActionIT.java | 51 ++++++++ .../xpack/profiling/ProfilingTestCase.java | 14 +- .../xpack/profiling/GetStatusAction.java | 36 +++++- .../xpack/profiling/RestGetStatusAction.java | 5 +- .../profiling/TransportGetStatusAction.java | 122 +++++++++++++++--- 7 files changed, 205 insertions(+), 33 deletions(-) create mode 100644 docs/changelog/99655.yaml create mode 100644 x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java diff --git a/docs/changelog/99655.yaml b/docs/changelog/99655.yaml new file mode 100644 index 0000000000000..3d1e76ec47aa3 --- /dev/null +++ b/docs/changelog/99655.yaml @@ -0,0 +1,5 @@ +pr: 99655 +summary: "[Profiling] Allow to wait until resources created" +area: Application +type: enhancement +issues: [] diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java index 599524915562c..c8ee6d91a1e47 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java @@ -10,11 +10,6 @@ import java.util.List; public class GetStackTracesActionIT extends ProfilingTestCase { - @Override - protected boolean useOnlyAllEvents() { - return randomBoolean(); - } - public void testGetStackTracesUnfiltered() throws Exception { GetStackTracesRequest request = new GetStackTracesRequest(1, null); GetStackTracesResponse response = client().execute(GetStackTracesAction.INSTANCE, request).get(); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java new file mode 100644 index 0000000000000..7cd5b08ee773f --- /dev/null +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.rest.RestStatus; + +public class GetStatusActionIT extends ProfilingTestCase { + @Override + protected boolean requiresDataSetup() { + // We need explicit control whether index template management is enabled, and thus we skip data setup. + return false; + } + + public void testTimeoutIfResourcesNotCreated() throws Exception { + updateProfilingTemplatesEnabled(false); + GetStatusAction.Request request = new GetStatusAction.Request(); + request.waitForResourcesCreated(true); + // shorter than the default timeout to avoid excessively long execution + request.timeout(TimeValue.timeValueSeconds(15)); + + GetStatusAction.Response response = client().execute(GetStatusAction.INSTANCE, request).get(); + assertEquals(RestStatus.REQUEST_TIMEOUT, response.status()); + assertFalse(response.isResourcesCreated()); + } + + public void testNoTimeoutIfNotWaiting() throws Exception { + updateProfilingTemplatesEnabled(false); + GetStatusAction.Request request = new GetStatusAction.Request(); + request.waitForResourcesCreated(false); + + GetStatusAction.Response response = client().execute(GetStatusAction.INSTANCE, request).get(); + assertEquals(RestStatus.OK, response.status()); + assertFalse(response.isResourcesCreated()); + } + + public void testWaitsUntilResourcesAreCreated() throws Exception { + updateProfilingTemplatesEnabled(true); + GetStatusAction.Request request = new GetStatusAction.Request(); + request.waitForResourcesCreated(true); + + GetStatusAction.Response response = client().execute(GetStatusAction.INSTANCE, request).get(); + assertEquals(RestStatus.OK, response.status()); + assertTrue(response.isResourcesCreated()); + } +} diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java index 240f05f6b4335..f15925b7c891b 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java @@ -84,7 +84,16 @@ private void indexDoc(String index, String id, Map source) { * * @return true iff this test should rely on only "profiling-events-all" being present. */ - protected abstract boolean useOnlyAllEvents(); + protected boolean useOnlyAllEvents() { + return randomBoolean(); + } + + /** + * @return true iff this test relies that data (and the corresponding indices / data streams) are present for this test. + */ + protected boolean requiresDataSetup() { + return true; + } protected void waitForIndices() throws Exception { assertBusy(() -> { @@ -110,6 +119,9 @@ protected void updateProfilingTemplatesEnabled(boolean newValue) { @Before public void setupData() throws Exception { + if (requiresDataSetup() == false) { + return; + } // only enable index management while setting up indices to avoid interfering with the rest of the test infrastructure updateProfilingTemplatesEnabled(true); Collection eventsIndices = useOnlyAllEvents() ? List.of(EventsIndex.FULL_INDEX.getName()) : EventsIndex.indexNames(); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java index 31540cffef010..8566978decaa8 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java @@ -14,7 +14,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.StatusToXContentObject; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -28,12 +29,13 @@ protected GetStatusAction() { super(NAME, GetStatusAction.Response::new); } - public static class Response extends ActionResponse implements ToXContentObject { + public static class Response extends ActionResponse implements StatusToXContentObject { private boolean profilingEnabled; private boolean resourceManagementEnabled; private boolean resourcesCreated; private boolean pre891Data; + private boolean timedOut; public Response(StreamInput in) throws IOException { super(in); @@ -41,6 +43,7 @@ public Response(StreamInput in) throws IOException { resourceManagementEnabled = in.readBoolean(); resourcesCreated = in.readBoolean(); pre891Data = in.readBoolean(); + timedOut = in.readBoolean(); } public Response(boolean profilingEnabled, boolean resourceManagementEnabled, boolean resourcesCreated, boolean pre891Data) { @@ -50,6 +53,14 @@ public Response(boolean profilingEnabled, boolean resourceManagementEnabled, boo this.pre891Data = pre891Data; } + public void setTimedOut(boolean timedOut) { + this.timedOut = timedOut; + } + + public boolean isResourcesCreated() { + return resourcesCreated; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -66,6 +77,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(resourceManagementEnabled); out.writeBoolean(resourcesCreated); out.writeBoolean(pre891Data); + out.writeBoolean(timedOut); } @Override @@ -76,12 +88,13 @@ public boolean equals(Object o) { return profilingEnabled == response.profilingEnabled && resourceManagementEnabled == response.resourceManagementEnabled && resourcesCreated == response.resourcesCreated - && pre891Data == response.pre891Data; + && pre891Data == response.pre891Data + && timedOut == response.timedOut; } @Override public int hashCode() { - return Objects.hash(profilingEnabled, resourceManagementEnabled, resourcesCreated, pre891Data); + return Objects.hash(profilingEnabled, resourceManagementEnabled, resourcesCreated, pre891Data, timedOut); } @Override @@ -89,16 +102,30 @@ public String toString() { return Strings.toString(this, true, true); } + @Override + public RestStatus status() { + return timedOut ? RestStatus.REQUEST_TIMEOUT : RestStatus.OK; + } } public static class Request extends AcknowledgedRequest { + private boolean waitForResourcesCreated; public Request(StreamInput in) throws IOException { super(in); + waitForResourcesCreated = in.readBoolean(); } public Request() {} + public boolean waitForResourcesCreated() { + return waitForResourcesCreated; + } + + public void waitForResourcesCreated(boolean waitForResourcesCreated) { + this.waitForResourcesCreated = waitForResourcesCreated; + } + @Override public ActionRequestValidationException validate() { return null; @@ -107,6 +134,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); + out.writeBoolean(waitForResourcesCreated); } } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStatusAction.java index c62d6dcad8c1a..714181f3dc0b5 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStatusAction.java @@ -10,7 +10,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.rest.action.RestStatusToXContentListener; import java.util.List; @@ -33,6 +33,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient GetStatusAction.Request request = new GetStatusAction.Request(); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); - return channel -> client.execute(GetStatusAction.INSTANCE, request, new RestToXContentListener<>(channel)); + request.waitForResourcesCreated(restRequest.paramAsBoolean("wait_for_resources_created", false)); + return channel -> client.execute(GetStatusAction.INSTANCE, request, new RestStatusToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java index abac8971596a1..8110cc5e968ec 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java @@ -7,23 +7,33 @@ package org.elasticsearch.xpack.profiling; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackSettings; public class TransportGetStatusAction extends TransportMasterNodeAction { + private static final Logger log = LogManager.getLogger(TransportGetStatusAction.class); + + private final StatusResolver resolver; + @Inject public TransportGetStatusAction( TransportService transportService, @@ -43,6 +53,7 @@ public TransportGetStatusAction( GetStatusAction.Response::new, ThreadPool.Names.SAME ); + this.resolver = new StatusResolver(clusterService); } @Override @@ -52,33 +63,102 @@ protected void masterOperation( ClusterState state, ActionListener listener ) { - IndexStateResolver indexStateResolver = new IndexStateResolver(getValue(state, ProfilingPlugin.PROFILING_CHECK_OUTDATED_INDICES)); - - boolean pluginEnabled = getValue(state, XPackSettings.PROFILING_ENABLED); - boolean resourceManagementEnabled = getValue(state, ProfilingPlugin.PROFILING_TEMPLATES_ENABLED); - - boolean templatesCreated = ProfilingIndexTemplateRegistry.isAllResourcesCreated(state, clusterService.getSettings()); - boolean indicesCreated = ProfilingIndexManager.isAllResourcesCreated(state, indexStateResolver); - boolean dataStreamsCreated = ProfilingDataStreamManager.isAllResourcesCreated(state, indexStateResolver); - boolean resourcesCreated = templatesCreated && indicesCreated && dataStreamsCreated; - - boolean indicesPre891 = ProfilingIndexManager.isAnyResourceTooOld(state, indexStateResolver); - boolean dataStreamsPre891 = ProfilingDataStreamManager.isAnyResourceTooOld(state, indexStateResolver); - boolean anyPre891Data = indicesPre891 || dataStreamsPre891; - listener.onResponse(new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, resourcesCreated, anyPre891Data)); - } - - private boolean getValue(ClusterState state, Setting setting) { - Metadata metadata = state.getMetadata(); - if (metadata.settings().hasValue(setting.getKey())) { - return setting.get(metadata.settings()); + if (request.waitForResourcesCreated()) { + createAndRegisterListener(listener, request.timeout()); } else { - return setting.get(clusterService.getSettings()); + listener.onResponse(resolver.getResponse(state)); } } + private void createAndRegisterListener(ActionListener listener, TimeValue timeout) { + final DiscoveryNode localNode = clusterService.localNode(); + ClusterStateObserver.waitForState( + clusterService, + threadPool.getThreadContext(), + new StatusListener(listener, localNode, clusterService, resolver), + clusterState -> resolver.getResponse(clusterState).isResourcesCreated(), + timeout, + log + ); + } + @Override protected ClusterBlockException checkBlock(GetStatusAction.Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } + + private static class StatusListener implements ClusterStateObserver.Listener { + private final ActionListener listener; + private final DiscoveryNode localNode; + + private final ClusterService clusterService; + + private final StatusResolver resolver; + + private StatusListener( + ActionListener listener, + DiscoveryNode localNode, + ClusterService clusterService, + StatusResolver resolver + ) { + this.listener = listener; + this.localNode = localNode; + this.clusterService = clusterService; + this.resolver = resolver; + } + + @Override + public void onNewClusterState(ClusterState state) { + listener.onResponse(resolver.getResponse(state)); + } + + @Override + public void onClusterServiceClose() { + listener.onFailure(new NodeClosedException(localNode)); + } + + @Override + public void onTimeout(TimeValue timeout) { + GetStatusAction.Response response = resolver.getResponse(clusterService.state()); + response.setTimedOut(true); + listener.onResponse(response); + } + } + + private static class StatusResolver { + private final ClusterService clusterService; + + private StatusResolver(ClusterService clusterService) { + this.clusterService = clusterService; + } + + private GetStatusAction.Response getResponse(ClusterState state) { + IndexStateResolver indexStateResolver = new IndexStateResolver( + getValue(state, ProfilingPlugin.PROFILING_CHECK_OUTDATED_INDICES) + ); + + boolean pluginEnabled = getValue(state, XPackSettings.PROFILING_ENABLED); + boolean resourceManagementEnabled = getValue(state, ProfilingPlugin.PROFILING_TEMPLATES_ENABLED); + + boolean templatesCreated = ProfilingIndexTemplateRegistry.isAllResourcesCreated(state, clusterService.getSettings()); + boolean indicesCreated = ProfilingIndexManager.isAllResourcesCreated(state, indexStateResolver); + boolean dataStreamsCreated = ProfilingDataStreamManager.isAllResourcesCreated(state, indexStateResolver); + boolean resourcesCreated = templatesCreated && indicesCreated && dataStreamsCreated; + + boolean indicesPre891 = ProfilingIndexManager.isAnyResourceTooOld(state, indexStateResolver); + boolean dataStreamsPre891 = ProfilingDataStreamManager.isAnyResourceTooOld(state, indexStateResolver); + boolean anyPre891Data = indicesPre891 || dataStreamsPre891; + + return new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, resourcesCreated, anyPre891Data); + } + + private boolean getValue(ClusterState state, Setting setting) { + Metadata metadata = state.getMetadata(); + if (metadata.settings().hasValue(setting.getKey())) { + return setting.get(metadata.settings()); + } else { + return setting.get(clusterService.getSettings()); + } + } + } } From 3a7bdb5838a853761de266c44c39ec097116dbf5 Mon Sep 17 00:00:00 2001 From: Felix Barnsteiner Date: Wed, 20 Sep 2023 13:22:36 +0200 Subject: [PATCH 22/27] Make reroute processor GA (#99531) --- docs/reference/ingest/processors/reroute.asciidoc | 2 -- 1 file changed, 2 deletions(-) diff --git a/docs/reference/ingest/processors/reroute.asciidoc b/docs/reference/ingest/processors/reroute.asciidoc index eb7eb211cd62f..482ff3b1cc116 100644 --- a/docs/reference/ingest/processors/reroute.asciidoc +++ b/docs/reference/ingest/processors/reroute.asciidoc @@ -4,8 +4,6 @@ Reroute ++++ -experimental::[] - The `reroute` processor allows to route a document to another target index or data stream. It has two main modes: From ebd5ead943649a27ab68206d261060ddf184fb07 Mon Sep 17 00:00:00 2001 From: Felix Barnsteiner Date: Wed, 20 Sep 2023 13:59:03 +0200 Subject: [PATCH 23/27] Remove ineffective options of preventing mapping explosions (#99665) Removes the recommendations to use the object field type and to set index: false. Both of these options are not effective with avoiding mapping explosions. --- .../common-issues/mapping-explosion.asciidoc | 91 +++++++++---------- 1 file changed, 42 insertions(+), 49 deletions(-) diff --git a/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc b/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc index 2d5b4f86e3c77..48e9f802e13f8 100644 --- a/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc +++ b/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc @@ -1,70 +1,63 @@ [[mapping-explosion]] === Mapping explosion -{es}'s search and {kibana-ref}/discover.html[{kib}'s discover] Javascript rendering are -dependent on the search's backing indices total amount of -<>, of all mapping depths. When this total -amount is too high or is exponentially climbing, we refer to it as -experiencing mapping explosion. Field counts going this high are uncommon -and usually suggest an upstream document formatting issue as -link:https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion[shown in this blog]. +{es}'s search and {kibana-ref}/discover.html[{kib}'s discover] Javascript rendering are +dependent on the search's backing indices total amount of +<>, of all mapping depths. When this total +amount is too high or is exponentially climbing, we refer to it as +experiencing mapping explosion. Field counts going this high are uncommon +and usually suggest an upstream document formatting issue as +link:https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion[shown in this blog]. Mapping explosion may surface as the following performance symptoms: -* <> reporting high heap or CPU on the main node -and/or nodes hosting the indices shards. This may potentially +* <> reporting high heap or CPU on the main node +and/or nodes hosting the indices shards. This may potentially escalate to temporary node unresponsiveness and/or main overwhelm. -* <> reporting long search durations only related to -this index or indices, even on simple searches. +* <> reporting long search durations only related to +this index or indices, even on simple searches. -* <> reporting long index durations only related to -this index or indices. This usually relates to <> -reporting that the coordinating node is waiting for all other nodes to +* <> reporting long index durations only related to +this index or indices. This usually relates to <> +reporting that the coordinating node is waiting for all other nodes to confirm they are on mapping update request. -* Discover's **Fields for wildcard** page-loading API command or {kibana-ref}/console-kibana.html[Dev Tools] page-refreshing Autocomplete API commands are taking a long time (more than 10 seconds) or +* Discover's **Fields for wildcard** page-loading API command or {kibana-ref}/console-kibana.html[Dev Tools] page-refreshing Autocomplete API commands are taking a long time (more than 10 seconds) or timing out in the browser's Developer Tools Network tab. * Discover's **Available fields** taking a long time to compile Javascript in the browser's Developer Tools Performance tab. This may potentially escalate to temporary browser page unresponsiveness. -* Kibana's {kibana-ref}/alerting-getting-started.html[alerting] or {security-guide}/detection-engine-overview.html[security rules] may error `The content length (X) is bigger than the maximum allowed string (Y)` where `X` is attempted payload and `Y` is {kib}'s {kibana-ref}/settings.html#server-maxPayload[`server-maxPayload`]. +* Kibana's {kibana-ref}/alerting-getting-started.html[alerting] or {security-guide}/detection-engine-overview.html[security rules] may error `The content length (X) is bigger than the maximum allowed string (Y)` where `X` is attempted payload and `Y` is {kib}'s {kibana-ref}/settings.html#server-maxPayload[`server-maxPayload`]. -* Long {es} start-up durations. +* Long {es} start-up durations. [discrete] [[prevent]] ==== Prevent or prepare -<> cannot be field-reduced once initialized. -{es} indices default to <> which -doesn't normally cause problems unless it's combined with overriding -<>. The -default `1000` limit is considered generous, though overriding to `10000` -doesn't cause noticable impact depending on use case. However, to give -a bad example, overriding to `100000` and this limit being hit -by mapping totals would usually have strong performance implications. - -If your index mapped fields expect to contain a large, arbitrary set of -keys, you may instead consider: - -* Using the <> data type. Please note, -however, that flattened objects is link:https://github.com/elastic/kibana/issues/25820[not fully supported in {kib}] yet. For example, this could apply to sub-mappings like { `host.name` , -`host.os`, `host.version` }. Desired fields are still accessed by +<> cannot be field-reduced once initialized. +{es} indices default to <> which +doesn't normally cause problems unless it's combined with overriding +<>. The +default `1000` limit is considered generous, though overriding to `10000` +doesn't cause noticable impact depending on use case. However, to give +a bad example, overriding to `100000` and this limit being hit +by mapping totals would usually have strong performance implications. + +If your index mapped fields expect to contain a large, arbitrary set of +keys, you may instead consider: + +* Using the <> data type. Please note, +however, that flattened objects is link:https://github.com/elastic/kibana/issues/25820[not fully supported in {kib}] yet. For example, this could apply to sub-mappings like { `host.name` , +`host.os`, `host.version` }. Desired fields are still accessed by <>. -* Using the <>. This is helpful when you're -interested in storing but not searching a group of fields. This is commonly -used for unknown upstream scenarios which may induce however many fields. -For example, this is recommended when sub-mappings start showing new, -unexpected fields like { `o365.a01`, `o365.a02`, `o365.b01`, `o365.c99`}. - -* Setting <> to disable a particular field's -searchability. This cannot effect current index mapping, but can apply -going forward via an <>. +* Disable <>. +This cannot effect current index mapping, but can apply going forward via an <>. -Modifying to the <> data type would not resolve the core -issue. +Modifying to the <> data type would not resolve the core +issue. [discrete] [[check]] @@ -91,12 +84,12 @@ You can use <> to find fields which [[complex]] ==== Complex explosions -Mapping explosions also covers when an individual index field totals are within limits but combined indices fields totals are very high. It's very common for symptoms to first be noticed on a {kibana-ref}/data-views.html[data view] and be traced back to an individual index or a subset of indices via the +Mapping explosions also covers when an individual index field totals are within limits but combined indices fields totals are very high. It's very common for symptoms to first be noticed on a {kibana-ref}/data-views.html[data view] and be traced back to an individual index or a subset of indices via the <>. -However, though less common, it is possible to only experience mapping explosions on the combination of backing indices. For example, if a <>'s backing indices are all at field total limit but each contain unique fields from one another. +However, though less common, it is possible to only experience mapping explosions on the combination of backing indices. For example, if a <>'s backing indices are all at field total limit but each contain unique fields from one another. -This situation most easily surfaces by adding a {kibana-ref}/data-views.html[data view] and checking its **Fields** tab for its total fields count. This statistic does tells you overall fields and not only where <>, but serves as a good baseline. +This situation most easily surfaces by adding a {kibana-ref}/data-views.html[data view] and checking its **Fields** tab for its total fields count. This statistic does tells you overall fields and not only where <>, but serves as a good baseline. If your issue only surfaces via a {kibana-ref}/data-views.html[data view], you may consider this menu's **Field filters** if you're not using <>. Alternatively, you may consider a more targeted index pattern or using a negative pattern to filter-out problematic indices. For example, if `logs-*` has too high a field count because of problematic backing indices `logs-lotsOfFields-*`, then you could update to either `logs-*,-logs-lotsOfFields-*` or `logs-iMeantThisAnyway-*`. @@ -109,12 +102,12 @@ Mapping explosion is not easily resolved, so it is better prevented via the abov * Disable <>. -* <> into an index with a corrected mapping, +* <> into an index with a corrected mapping, either via <> or <>. * If index is unneeded and/or historical, consider <>. -* {logstash-ref}/plugins-inputs-elasticsearch.html[Export] and {logstash-ref}/plugins-outputs-elasticsearch.html[re-import] data into a mapping-corrected index after {logstash-ref}/plugins-filters-prune.html[pruning] +* {logstash-ref}/plugins-inputs-elasticsearch.html[Export] and {logstash-ref}/plugins-outputs-elasticsearch.html[re-import] data into a mapping-corrected index after {logstash-ref}/plugins-filters-prune.html[pruning] problematic fields via Logstash. -<> would not resolve the core issue. +<> would not resolve the core issue. From 0ad8abaf4ab854a8c0005e7ab747c0b9822433ca Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 20 Sep 2023 13:03:54 +0100 Subject: [PATCH 24/27] Fix thread context in getRepositoryData (#99627) Listeners which subscribe to `BlobStoreRepository#repoDataInitialized` are today completed in the thread context of the thread which first triggers the initialization of repository generation tracking, but we must instead capture each listener's own thread context to avoid cross-context pollution. --- docs/changelog/99627.yaml | 5 + .../blobstore/BlobStoreRepository.java | 245 ++++++++++-------- .../blobstore/BlobStoreRepositoryTests.java | 62 ++++- .../SingleResultDeduplicatorTests.java | 51 ++++ 4 files changed, 241 insertions(+), 122 deletions(-) create mode 100644 docs/changelog/99627.yaml diff --git a/docs/changelog/99627.yaml b/docs/changelog/99627.yaml new file mode 100644 index 0000000000000..84abdf6418dc2 --- /dev/null +++ b/docs/changelog/99627.yaml @@ -0,0 +1,5 @@ +pr: 99627 +summary: Fix thread context in `getRepositoryData` +area: Snapshot/Restore +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index c0ef6581db94b..bfa4cc5be7863 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.SingleResultDeduplicator; import org.elasticsearch.action.support.GroupedActionListener; -import org.elasticsearch.action.support.ListenableActionFuture; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.RefCountingRunnable; @@ -68,6 +67,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.xcontent.ChunkedToXContent; @@ -1788,36 +1788,41 @@ public void getRepositoryData(ActionListener listener) { // master-eligible or not. assert clusterService.localNode().isMasterNode() : "should only load repository data on master nodes"; - if (lifecycle.started() == false) { - listener.onFailure(notStartedException()); - return; - } + while (true) { + // retry loop, in case the state changes underneath us somehow - if (latestKnownRepoGen.get() == RepositoryData.CORRUPTED_REPO_GEN) { - listener.onFailure(corruptedStateException(null, null)); - return; - } - final RepositoryData cached = latestKnownRepositoryData.get(); - // Fast path loading repository data directly from cache if we're in fully consistent mode and the cache matches up with - // the latest known repository generation - if (bestEffortConsistency == false && cached.getGenId() == latestKnownRepoGen.get()) { - listener.onResponse(cached); - return; - } - if (metadata.generation() == RepositoryData.UNKNOWN_REPO_GEN && isReadOnly() == false) { - logger.debug( - "[{}] loading repository metadata for the first time, trying to determine correct generation and to store " - + "it in the cluster state", - metadata.name() - ); - initializeRepoGenerationTracking(listener); - } else { - logger.trace( - "[{}] loading un-cached repository data with best known repository generation [{}]", - metadata.name(), - latestKnownRepoGen - ); - repoDataLoadDeduplicator.execute(listener); + if (lifecycle.started() == false) { + listener.onFailure(notStartedException()); + return; + } + + if (latestKnownRepoGen.get() == RepositoryData.CORRUPTED_REPO_GEN) { + listener.onFailure(corruptedStateException(null, null)); + return; + } + final RepositoryData cached = latestKnownRepositoryData.get(); + // Fast path loading repository data directly from cache if we're in fully consistent mode and the cache matches up with + // the latest known repository generation + if (bestEffortConsistency == false && cached.getGenId() == latestKnownRepoGen.get()) { + listener.onResponse(cached); + return; + } + if (metadata.generation() == RepositoryData.UNKNOWN_REPO_GEN && isReadOnly() == false) { + logger.debug(""" + [{}] loading repository metadata for the first time, trying to determine correct generation and to store it in the \ + cluster state""", metadata.name()); + if (initializeRepoGenerationTracking(listener)) { + return; + } // else there was a concurrent modification, retry from the start + } else { + logger.trace( + "[{}] loading un-cached repository data with best known repository generation [{}]", + metadata.name(), + latestKnownRepoGen + ); + repoDataLoadDeduplicator.execute(listener); + return; + } } } @@ -1826,7 +1831,8 @@ private RepositoryException notStartedException() { } // Listener used to ensure that repository data is only initialized once in the cluster state by #initializeRepoGenerationTracking - private ListenableActionFuture repoDataInitialized; + @Nullable // unless we're in the process of initializing repo-generation tracking + private SubscribableListener repoDataInitialized; /** * Method used to set the current repository generation in the cluster state's {@link RepositoryMetadata} to the latest generation that @@ -1835,103 +1841,120 @@ private RepositoryException notStartedException() { * have a consistent view of the {@link RepositoryData} before any data has been written to the repository. * * @param listener listener to resolve with new repository data + * @return {@code true} if this method at least started the initialization process successfully and will eventually complete the + * listener, {@code false} if there was some concurrent state change which prevents us from starting repo generation tracking (typically + * that some other node got there first) and the caller should check again and possibly retry or complete the listener in some other + * way. */ - private void initializeRepoGenerationTracking(ActionListener listener) { + private boolean initializeRepoGenerationTracking(ActionListener listener) { + final SubscribableListener listenerToSubscribe; + final ActionListener listenerToComplete; + synchronized (this) { if (repoDataInitialized == null) { - // double check the generation since we checked it outside the mutex in the caller and it could have changed by a + // double-check the generation since we checked it outside the mutex in the caller and it could have changed by a // concurrent initialization of the repo metadata and just load repository normally in case we already finished the // initialization if (metadata.generation() != RepositoryData.UNKNOWN_REPO_GEN) { - getRepositoryData(listener); - return; + return false; // retry } logger.trace("[{}] initializing repository generation in cluster state", metadata.name()); - repoDataInitialized = new ListenableActionFuture<>(); - repoDataInitialized.addListener(listener); - final Consumer onFailure = e -> { - logger.warn( - () -> format("[%s] Exception when initializing repository generation in cluster state", metadata.name()), - e - ); - final ActionListener existingListener; - synchronized (BlobStoreRepository.this) { - existingListener = repoDataInitialized; - repoDataInitialized = null; + repoDataInitialized = listenerToSubscribe = new SubscribableListener<>(); + listenerToComplete = new ActionListener<>() { + private ActionListener acquireAndClearRepoDataInitialized() { + synchronized (BlobStoreRepository.this) { + assert repoDataInitialized == listenerToSubscribe; + repoDataInitialized = null; + return listenerToSubscribe; + } } - existingListener.onFailure(e); - }; - repoDataLoadDeduplicator.execute( - ActionListener.wrap( - repoData -> submitUnbatchedTask( - "set initial safe repository generation [" + metadata.name() + "][" + repoData.getGenId() + "]", - new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - RepositoryMetadata metadata = getRepoMetadata(currentState); - // No update to the repository generation should have occurred concurrently in general except - // for - // extreme corner cases like failing over to an older version master node and back to the - // current - // node concurrently - if (metadata.generation() != RepositoryData.UNKNOWN_REPO_GEN) { - throw new RepositoryException( - metadata.name(), - "Found unexpected initialized repo metadata [" + metadata + "]" - ); - } - return ClusterState.builder(currentState) - .metadata( - Metadata.builder(currentState.getMetadata()) - .putCustom( - RepositoriesMetadata.TYPE, - RepositoriesMetadata.get(currentState) - .withUpdatedGeneration(metadata.name(), repoData.getGenId(), repoData.getGenId()) - ) - ) - .build(); - } - @Override - public void onFailure(Exception e) { - onFailure.accept(e); - } + @Override + public void onResponse(RepositoryData repositoryData) { + acquireAndClearRepoDataInitialized().onResponse(repositoryData); + } - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - logger.trace( - "[{}] initialized repository generation in cluster state to [{}]", - metadata.name(), - repoData.getGenId() - ); - // Resolve listeners on generic pool since some callbacks for repository data do additional IO - threadPool.generic().execute(() -> { - final ActionListener existingListener; - synchronized (BlobStoreRepository.this) { - existingListener = repoDataInitialized; - repoDataInitialized = null; - } - existingListener.onResponse(repoData); - logger.trace( - "[{}] called listeners after initializing repository to generation [{}]", - metadata.name(), - repoData.getGenId() - ); - }); - } - } - ), - onFailure - ) - ); + @Override + public void onFailure(Exception e) { + logger.warn( + () -> format("[%s] Exception when initializing repository generation in cluster state", metadata.name()), + e + ); + acquireAndClearRepoDataInitialized().onFailure(e); + } + }; } else { logger.trace( "[{}] waiting for existing initialization of repository metadata generation in cluster state", metadata.name() ); - repoDataInitialized.addListener(listener); - } + listenerToComplete = null; + listenerToSubscribe = repoDataInitialized; + } + } + + if (listenerToComplete != null) { + SubscribableListener + // load the current repository data + .newForked(repoDataLoadDeduplicator::execute) + // write its generation to the cluster state + .andThen( + (l, repoData) -> submitUnbatchedTask( + "set initial safe repository generation [" + metadata.name() + "][" + repoData.getGenId() + "]", + new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return getClusterStateWithUpdatedRepositoryGeneration(currentState, repoData); + } + + @Override + public void onFailure(Exception e) { + l.onFailure(e); + } + + @Override + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { + l.onResponse(repoData); + } + } + ) + ) + // fork to generic pool since we're on the applier thread and some callbacks for repository data do additional IO + .andThen((l, repoData) -> { + logger.trace("[{}] initialized repository generation in cluster state to [{}]", metadata.name(), repoData.getGenId()); + threadPool.generic().execute(ActionRunnable.supply(ActionListener.runAfter(l, () -> { + logger.trace( + "[{}] called listeners after initializing repository to generation [{}]", + metadata.name(), + repoData.getGenId() + ); + }), () -> repoData)); + }) + // and finally complete the listener + .addListener(listenerToComplete); } + + listenerToSubscribe.addListener(listener, EsExecutors.DIRECT_EXECUTOR_SERVICE, threadPool.getThreadContext()); + return true; + } + + private ClusterState getClusterStateWithUpdatedRepositoryGeneration(ClusterState currentState, RepositoryData repoData) { + // In theory we might have failed over to a different master which initialized the repo and then failed back to this node, so we + // must check the repository generation in the cluster state is still unknown here. + final RepositoryMetadata repoMetadata = getRepoMetadata(currentState); + if (repoMetadata.generation() != RepositoryData.UNKNOWN_REPO_GEN) { + throw new RepositoryException(repoMetadata.name(), "Found unexpected initialized repo metadata [" + repoMetadata + "]"); + } + return ClusterState.builder(currentState) + .metadata( + Metadata.builder(currentState.getMetadata()) + .putCustom( + RepositoriesMetadata.TYPE, + RepositoriesMetadata.get(currentState) + .withUpdatedGeneration(repoMetadata.name(), repoData.getGenId(), repoData.getGenId()) + ) + ) + .build(); } /** diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 78f1e6c46956e..624ad6a9fc7da 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -9,8 +9,10 @@ package org.elasticsearch.repositories.blobstore; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -24,6 +26,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.IndexVersion; @@ -36,6 +39,7 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.ShardGeneration; import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.SnapshotShardContext; @@ -46,6 +50,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.junit.After; import java.io.IOException; import java.nio.file.Path; @@ -55,7 +60,9 @@ import java.util.List; import java.util.Map; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -75,6 +82,7 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase { static final String REPO_TYPE = "fsLike"; + private static final String TEST_REPO_NAME = "test-repo"; protected Collection> getPlugins() { return Arrays.asList(FsLikeRepoPlugin.class); @@ -106,12 +114,11 @@ protected void assertSnapshotOrGenericThread() { public void testRetrieveSnapshots() throws Exception { final Client client = client(); final Path location = ESIntegTestCase.randomRepoPath(node().settings()); - final String repositoryName = "test-repo"; logger.info("--> creating repository"); AcknowledgedResponse putRepositoryResponse = client.admin() .cluster() - .preparePutRepository(repositoryName) + .preparePutRepository(TEST_REPO_NAME) .setType(REPO_TYPE) .setSettings(Settings.builder().put(node().settings()).put("location", location)) .get(); @@ -131,7 +138,7 @@ public void testRetrieveSnapshots() throws Exception { logger.info("--> create first snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repositoryName, "test-snap-1") + .prepareCreateSnapshot(TEST_REPO_NAME, "test-snap-1") .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -140,7 +147,7 @@ public void testRetrieveSnapshots() throws Exception { logger.info("--> create second snapshot"); createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repositoryName, "test-snap-2") + .prepareCreateSnapshot(TEST_REPO_NAME, "test-snap-2") .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -148,7 +155,7 @@ public void testRetrieveSnapshots() throws Exception { logger.info("--> make sure the node's repository can resolve the snapshots"); final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(TEST_REPO_NAME); final List originalSnapshots = Arrays.asList(snapshotId1, snapshotId2); List snapshotIds = ESBlobStoreRepositoryIntegTestCase.getRepositoryData(repository) @@ -255,13 +262,12 @@ public void testRepositoryDataConcurrentModificationNotAllowed() throws Exceptio public void testBadChunksize() throws Exception { final Client client = client(); final Path location = ESIntegTestCase.randomRepoPath(node().settings()); - final String repositoryName = "test-repo"; expectThrows( RepositoryException.class, () -> client.admin() .cluster() - .preparePutRepository(repositoryName) + .preparePutRepository(TEST_REPO_NAME) .setType(REPO_TYPE) .setSettings( Settings.builder() @@ -345,7 +351,6 @@ private static void writeIndexGen(BlobStoreRepository repository, RepositoryData private BlobStoreRepository setupRepo() { final Client client = client(); final Path location = ESIntegTestCase.randomRepoPath(node().settings()); - final String repositoryName = "test-repo"; Settings.Builder repoSettings = Settings.builder().put(node().settings()).put("location", location); boolean compress = randomBoolean(); @@ -354,20 +359,29 @@ private BlobStoreRepository setupRepo() { } AcknowledgedResponse putRepositoryResponse = client.admin() .cluster() - .preparePutRepository(repositoryName) + .preparePutRepository(TEST_REPO_NAME) .setType(REPO_TYPE) .setSettings(repoSettings) .setVerify(false) // prevent eager reading of repo data - .get(); + .get(TimeValue.timeValueSeconds(10)); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(TEST_REPO_NAME); assertThat("getBlobContainer has to be lazy initialized", repository.getBlobContainer(), nullValue()); assertEquals("Compress must be set to", compress, repository.isCompress()); return repository; } + @After + public void removeRepo() { + try { + client().admin().cluster().prepareDeleteRepository(TEST_REPO_NAME).get(TimeValue.timeValueSeconds(10)); + } catch (RepositoryMissingException e) { + // ok, not all tests create the test repo + } + } + private RepositoryData addRandomSnapshotsToRepoData(RepositoryData repoData, boolean inclIndices) { int numSnapshots = randomIntBetween(1, 20); for (int i = 0; i < numSnapshots; i++) { @@ -441,6 +455,32 @@ protected void snapshotFile(SnapshotShardContext context, BlobStoreIndexShardSna listenerCalled.get(); } + public void testGetRepositoryDataThreadContext() { + final var future = new PlainActionFuture(); + try (var listeners = new RefCountingListener(future)) { + final var repo = setupRepo(); + final int threads = between(1, 5); + final var barrier = new CyclicBarrier(threads); + final var headerName = "test-header"; + final var threadPool = client().threadPool(); + final var threadContext = threadPool.getThreadContext(); + for (int i = 0; i < threads; i++) { + final var headerValue = randomAlphaOfLength(10); + try (var ignored = threadContext.stashContext()) { + threadContext.putHeader(headerName, headerValue); + threadPool.generic().execute(ActionRunnable.wrap(listeners.acquire(), l -> { + safeAwait(barrier); + repo.getRepositoryData(l.map(repositoryData -> { + assertEquals(headerValue, threadContext.getHeader(headerName)); + return null; + })); + })); + } + } + } + future.actionGet(10, TimeUnit.SECONDS); + } + private Environment createEnvironment() { Path home = createTempDir(); return TestEnvironment.newEnvironment( diff --git a/server/src/test/java/org/elasticsearch/transport/SingleResultDeduplicatorTests.java b/server/src/test/java/org/elasticsearch/transport/SingleResultDeduplicatorTests.java index 56bfe72241f28..fb4c9df512a5a 100644 --- a/server/src/test/java/org/elasticsearch/transport/SingleResultDeduplicatorTests.java +++ b/server/src/test/java/org/elasticsearch/transport/SingleResultDeduplicatorTests.java @@ -10,10 +10,20 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.SingleResultDeduplicator; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; public class SingleResultDeduplicatorTests extends ESTestCase { @@ -74,4 +84,45 @@ public void onFailure(Exception e) { assertTrue(called[i]); } } + + public void testThreadContextPreservation() { + final var resources = new Releasable[1]; + try { + final var future = new PlainActionFuture(); + try (var listeners = new RefCountingListener(future)) { + final var threadContext = new ThreadContext(Settings.EMPTY); + final var deduplicator = new SingleResultDeduplicator(threadContext, l -> l.onResponse(null)); + final var threads = between(1, 5); + final var executor = EsExecutors.newFixed( + "test", + threads, + 0, + EsExecutors.daemonThreadFactory("test"), + threadContext, + EsExecutors.TaskTrackingConfig.DO_NOT_TRACK + ); + resources[0] = () -> ThreadPool.terminate(executor, 10, TimeUnit.SECONDS); + final var barrier = new CyclicBarrier(threads); + final var headerName = "test-header"; + for (int i = 0; i < threads; i++) { + try (var ignored = threadContext.stashContext()) { + final var headerValue = randomAlphaOfLength(10); + threadContext.putHeader(headerName, headerValue); + executor.execute( + ActionRunnable.wrap( + listeners.acquire(v -> assertEquals(headerValue, threadContext.getHeader(headerName))), + listener -> { + safeAwait(barrier); + deduplicator.execute(listener); + } + ) + ); + } + } + } + future.actionGet(10, TimeUnit.SECONDS); + } finally { + Releasables.closeExpectNoException(resources); + } + } } From cdedf5387b5f27f780a2cd517062fbafa7ef3ed5 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 20 Sep 2023 13:46:13 +0100 Subject: [PATCH 25/27] [ML] Adds parsing tests for the ELSER model service (#99693) --- .../services/elser/ElserMlNodeService.java | 15 +- .../elser/ElserMlNodeServiceTests.java | 128 ++++++++++++++++++ 2 files changed, 138 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java index 7c542e8acd22b..602048f2e3e76 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java @@ -44,12 +44,19 @@ public static ElserMlNodeModel parseConfig( Map settings ) { Map serviceSettingsMap = removeFromMapOrThrowIfNull(settings, Model.SERVICE_SETTINGS); - Map taskSettingsMap = removeFromMapOrThrowIfNull(settings, Model.TASK_SETTINGS); - var serviceSettings = serviceSettingsFromMap(serviceSettingsMap); + + Map taskSettingsMap; + // task settings are optional + if (settings.containsKey(Model.TASK_SETTINGS)) { + taskSettingsMap = removeFromMapOrThrowIfNull(settings, Model.TASK_SETTINGS); + } else { + taskSettingsMap = Map.of(); + } + var taskSettings = taskSettingsFromMap(taskType, taskSettingsMap); - if (throwOnUnknownFields == false) { + if (throwOnUnknownFields) { throwIfNotEmptyMap(settings); throwIfNotEmptyMap(serviceSettingsMap); throwIfNotEmptyMap(taskSettingsMap); @@ -133,8 +140,6 @@ private static ElserMlNodeTaskSettings taskSettingsFromMap(TaskType taskType, Ma } // no config options yet - throwIfNotEmptyMap(config); - return ElserMlNodeTaskSettings.DEFAULT; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceTests.java index 008e6a8c17653..bdbb4c545900c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceTests.java @@ -7,10 +7,18 @@ package org.elasticsearch.xpack.inference.services.elser; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.inference.Model; import org.elasticsearch.xpack.inference.TaskType; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; + public class ElserMlNodeServiceTests extends ESTestCase { public static Model randomModelConfig(String modelId, TaskType taskType) { @@ -25,4 +33,124 @@ public static Model randomModelConfig(String modelId, TaskType taskType) { default -> throw new IllegalArgumentException("task type " + taskType + " is not supported"); }; } + + public void testParseConfigStrict() { + var service = new ElserMlNodeService(mock(Client.class)); + + var settings = new HashMap(); + settings.put( + Model.SERVICE_SETTINGS, + new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + ); + settings.put(Model.TASK_SETTINGS, Map.of()); + + ElserMlNodeModel parsedModel = service.parseConfigStrict("foo", TaskType.SPARSE_EMBEDDING, settings); + + assertEquals( + new ElserMlNodeModel( + "foo", + TaskType.SPARSE_EMBEDDING, + ElserMlNodeService.NAME, + new ElserMlNodeServiceSettings(1, 4), + ElserMlNodeTaskSettings.DEFAULT + ), + parsedModel + ); + } + + public void testParseConfigStrictWithNoTaskSettings() { + var service = new ElserMlNodeService(mock(Client.class)); + + var settings = new HashMap(); + settings.put( + Model.SERVICE_SETTINGS, + new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + ); + + ElserMlNodeModel parsedModel = service.parseConfigStrict("foo", TaskType.SPARSE_EMBEDDING, settings); + + assertEquals( + new ElserMlNodeModel( + "foo", + TaskType.SPARSE_EMBEDDING, + ElserMlNodeService.NAME, + new ElserMlNodeServiceSettings(1, 4), + ElserMlNodeTaskSettings.DEFAULT + ), + parsedModel + ); + } + + public void testParseConfigStrictWithUnknownSettings() { + + for (boolean throwOnUnknown : new boolean[] { true, false }) { + { + var settings = new HashMap(); + settings.put( + Model.SERVICE_SETTINGS, + new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + ); + settings.put(Model.TASK_SETTINGS, Map.of()); + settings.put("foo", "bar"); + + if (throwOnUnknown) { + var e = expectThrows( + ElasticsearchStatusException.class, + () -> ElserMlNodeService.parseConfig(throwOnUnknown, "foo", TaskType.SPARSE_EMBEDDING, settings) + ); + assertThat( + e.getMessage(), + containsString("Model configuration contains settings [{foo=bar}] unknown to the [elser_mlnode] service") + ); + } else { + var parsed = ElserMlNodeService.parseConfig(throwOnUnknown, "foo", TaskType.SPARSE_EMBEDDING, settings); + } + } + + { + var settings = new HashMap(); + settings.put( + Model.SERVICE_SETTINGS, + new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + ); + settings.put(Model.TASK_SETTINGS, Map.of("foo", "bar")); + + if (throwOnUnknown) { + var e = expectThrows( + ElasticsearchStatusException.class, + () -> ElserMlNodeService.parseConfig(throwOnUnknown, "foo", TaskType.SPARSE_EMBEDDING, settings) + ); + assertThat( + e.getMessage(), + containsString("Model configuration contains settings [{foo=bar}] unknown to the [elser_mlnode] service") + ); + } else { + var parsed = ElserMlNodeService.parseConfig(throwOnUnknown, "foo", TaskType.SPARSE_EMBEDDING, settings); + } + } + + { + var settings = new HashMap(); + settings.put( + Model.SERVICE_SETTINGS, + new HashMap<>( + Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4, "foo", "bar") + ) + ); + + if (throwOnUnknown) { + var e = expectThrows( + ElasticsearchStatusException.class, + () -> ElserMlNodeService.parseConfig(throwOnUnknown, "foo", TaskType.SPARSE_EMBEDDING, settings) + ); + assertThat( + e.getMessage(), + containsString("Model configuration contains settings [{foo=bar}] unknown to the [elser_mlnode] service") + ); + } else { + var parsed = ElserMlNodeService.parseConfig(throwOnUnknown, "foo", TaskType.SPARSE_EMBEDDING, settings); + } + } + } + } } From a5d07ee51f980942a794300636c018d0da1da5b1 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 20 Sep 2023 06:15:04 -0700 Subject: [PATCH 26/27] Make transport and index versions easier to extend (#99688) When overriding transport and index versions, it is difficult to decide whether a version constant in serverless should be returned, or the latest version constant from serverless should be used, since the latest from serverless is not available. This commit adjust the version extension methods to pass in the latest serverless version constants. It also tweaks module and method visibility for a helper method needed. --- server/src/main/java/module-info.java | 6 +++++- .../main/java/org/elasticsearch/TransportVersion.java | 2 +- .../main/java/org/elasticsearch/TransportVersions.java | 2 +- .../java/org/elasticsearch/index/IndexVersion.java | 2 +- .../org/elasticsearch/internal/VersionExtension.java | 10 ++++++---- 5 files changed, 14 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 472707babf155..95749784afc45 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -279,7 +279,11 @@ exports org.elasticsearch.indices.recovery.plan; exports org.elasticsearch.indices.store; exports org.elasticsearch.ingest; - exports org.elasticsearch.internal to org.elasticsearch.serverless.version, org.elasticsearch.serverless.buildinfo; + exports org.elasticsearch.internal + to + org.elasticsearch.serverless.version, + org.elasticsearch.serverless.buildinfo, + org.elasticsearch.serverless.constants; exports org.elasticsearch.lucene.analysis.miscellaneous; exports org.elasticsearch.lucene.grouping; exports org.elasticsearch.lucene.queries; diff --git a/server/src/main/java/org/elasticsearch/TransportVersion.java b/server/src/main/java/org/elasticsearch/TransportVersion.java index 2aae6befb673a..92bb88f16385d 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersion.java +++ b/server/src/main/java/org/elasticsearch/TransportVersion.java @@ -115,7 +115,7 @@ private static TransportVersion findCurrent() { if (versionExtension == null) { return TransportVersions.LATEST_DEFINED; } - var version = versionExtension.getCurrentTransportVersion(); + var version = versionExtension.getCurrentTransportVersion(TransportVersions.LATEST_DEFINED); assert version.onOrAfter(TransportVersions.LATEST_DEFINED); return version; } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index d9c0bc59c06f9..5c28fa17b7b3e 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -217,7 +217,7 @@ static TransportVersion def(int id) { IDS = null; } - static NavigableMap getAllVersionIds(Class cls) { + public static NavigableMap getAllVersionIds(Class cls) { Map versionIdFields = new HashMap<>(); NavigableMap builder = new TreeMap<>(); diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersion.java b/server/src/main/java/org/elasticsearch/index/IndexVersion.java index 49e2172b2f4b2..1cb03574afd86 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersion.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersion.java @@ -160,7 +160,7 @@ private static IndexVersion findCurrent() { if (versionExtension == null) { return LATEST_DEFINED; } - var version = versionExtension.getCurrentIndexVersion(); + var version = versionExtension.getCurrentIndexVersion(LATEST_DEFINED); assert version.onOrAfter(LATEST_DEFINED); assert version.luceneVersion.equals(Version.LATEST) diff --git a/server/src/main/java/org/elasticsearch/internal/VersionExtension.java b/server/src/main/java/org/elasticsearch/internal/VersionExtension.java index 83974b3b65158..9c1b515df4043 100644 --- a/server/src/main/java/org/elasticsearch/internal/VersionExtension.java +++ b/server/src/main/java/org/elasticsearch/internal/VersionExtension.java @@ -18,14 +18,16 @@ public interface VersionExtension { /** * Returns the {@link TransportVersion} that Elasticsearch should use. *

- * This must be at least equal to the latest version found in {@link TransportVersion} V_* constants. + * This must be at least as high as the given fallback. + * @param fallback The latest transport version from server */ - TransportVersion getCurrentTransportVersion(); + TransportVersion getCurrentTransportVersion(TransportVersion fallback); /** * Returns the {@link IndexVersion} that Elasticsearch should use. *

- * This must be at least equal to the latest version found in {@link IndexVersion} V_* constants. + * This must be at least as high as the given fallback. + * @param fallback The latest index version from server */ - IndexVersion getCurrentIndexVersion(); + IndexVersion getCurrentIndexVersion(IndexVersion fallback); } From d2797910435d2273eb41bdb95c10dbd9f5e4e125 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Wed, 20 Sep 2023 15:42:54 +0200 Subject: [PATCH 27/27] Run SignificanceLookup BackgroundFrequency query in its own IndexSearcher (#99704) Use a brand new index searcher so the query runs on the current thread. --- .../bucket/SignificantTermsSignificanceScoreIT.java | 1 - .../search/aggregations/bucket/terms/SignificanceLookup.java | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 24fd711d18a72..c8d89785fc4af 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -472,7 +472,6 @@ private void indexEqualTestData() throws ExecutionException, InterruptedExceptio indexRandom(true, false, indexRequestBuilders); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99690") public void testScriptScore() throws ExecutionException, InterruptedException, IOException { String type = randomBoolean() ? "text" : "long"; indexRandomFrequencies01(type); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java index a71d26061752e..9ac9c0e241566 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java @@ -233,7 +233,8 @@ private long getBackgroundFrequency(Query query) throws IOException { if (backgroundFilter != null) { query = new BooleanQuery.Builder().add(query, Occur.FILTER).add(backgroundFilter, Occur.FILTER).build(); } - return context.searcher().count(query); + // use a brand new index searcher as we want to run this query on the current thread + return new IndexSearcher(context.searcher().getIndexReader()).count(query); } private TermsEnum getTermsEnum(String field) throws IOException {