From 2136af2846b4d07244d5e4debe8c884e2f451966 Mon Sep 17 00:00:00 2001 From: weizijun Date: Mon, 22 Nov 2021 21:06:37 +0800 Subject: [PATCH 01/88] Fix time series timestamp meta missing (#80695) I find a time series timestamp meta missing case. here is the reproduce steps: 1. create a time_series index, and set the timestamp field some meta. 2. index a doc with a new field that is not in mappings, it will call mappings marge. 3. then the timestamp field meta is missing. the reason that meta is missing is when a new field comes, `MappingParser.parse` will build a new mapping with new fields. And merge the new mappings with exist mapping. the new mapping have no timestamp field, so it will auto add timestamp field, the timestamp is without user's meta info. And merge method build a new timestamp field to override the user's timestamp field. It cause the timestamp meta missing. I fixed the case, by move timestamp logic from MappingParser.parse to create index logic. And move the tests to a new IT test. I add a test to test case, TimeSeriesModeIT.testAddTimeStampMeta will fail in the pre-commit. --- .../test/tsdb/15_timestamp_mapping.yml | 2 +- .../elasticsearch/index/TimeSeriesModeIT.java | 598 ++++++++++++++++++ .../metadata/MetadataCreateIndexService.java | 14 +- .../org/elasticsearch/index/IndexMode.java | 91 ++- .../index/mapper/MappingParser.java | 9 +- .../index/TimeSeriesModeTests.java | 241 ------- 6 files changed, 651 insertions(+), 304 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/index/TimeSeriesModeIT.java diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml index 9d61d4c359b6d..7ac5bc4be4d7b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml @@ -164,7 +164,7 @@ reject timestamp meta field with wrong type: reason: introduced in 8.0.0 to be backported to 7.16.0 - do: - catch: /.* time series index \[_data_stream_timestamp\] meta field must be enabled/ + catch: /\[_data_stream_timestamp\] meta field has been disabled/ indices.create: index: test body: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/TimeSeriesModeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/TimeSeriesModeIT.java new file mode 100644 index 0000000000000..74f58fe2364ae --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/TimeSeriesModeIT.java @@ -0,0 +1,598 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index; + +import org.elasticsearch.action.DocWriteResponse.Result; +import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; +import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; + +import java.io.IOException; +import java.util.Locale; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class TimeSeriesModeIT extends ESIntegTestCase { + public void testDisabledTimeStampMapper() throws IOException { + Settings s = Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") + .build(); + XContentBuilder mappings = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject(DataStreamTimestampFieldMapper.NAME) + .field("enabled", false) + .endObject() + .endObject() + .endObject(); + + Exception e = expectThrows(IllegalStateException.class, () -> prepareCreate("test").setSettings(s).setMapping(mappings).get()); + assertThat(e.getMessage(), equalTo("[_data_stream_timestamp] meta field has been disabled")); + } + + public void testBadTimeStampMapper() throws IOException { + Settings s = Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") + .build(); + XContentBuilder mappings = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .field(DataStreamTimestampFieldMapper.NAME, "enabled") + .endObject() + .endObject(); + + Exception e = expectThrows(MapperParsingException.class, () -> prepareCreate("test").setSettings(s).setMapping(mappings).get()); + assertThat(e.getMessage(), equalTo("Failed to parse mapping: [_data_stream_timestamp] config must be an object")); + } + + public void testBadTimestamp() throws IOException { + Settings s = Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") + .build(); + String type = randomFrom("keyword", "integer", "long", "double", "text"); + XContentBuilder mappings = XContentFactory.jsonBuilder(); + mappings.startObject(); + { + mappings.startObject("_doc"); + { + mappings.startObject("properties"); + { + mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); + { + mappings.field("type", type); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + Exception e = expectThrows(IllegalArgumentException.class, () -> prepareCreate("test").setSettings(s).setMapping(mappings).get()); + assertThat( + e.getMessage(), + equalTo("data stream timestamp field [@timestamp] is of type [" + type + "], but [date,date_nanos] is expected") + ); + } + + public void testAddsTimestamp() throws IOException { + Settings s = Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") + .build(); + + String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + prepareCreate(index).setSettings(s).get(); + ensureGreen(index); + + GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).get(); + assertThat(getMappingsResponse.getMappings().size(), equalTo(1)); + + XContentBuilder expect = XContentFactory.jsonBuilder(); + expect.startObject(); + { + expect.startObject("_doc"); + { + expect.startObject(DataStreamTimestampFieldMapper.NAME); + { + expect.field("enabled", true); + } + expect.endObject(); + expect.startObject("properties"); + { + expect.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); + { + expect.field("type", "date"); + } + expect.endObject(); + } + expect.endObject(); + } + expect.endObject(); + } + expect.endObject(); + assertThat(getMappingsResponse.getMappings().get(index).source().string(), equalTo(Strings.toString(expect))); + } + + public void testTimestampMillis() throws IOException { + Settings s = Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") + .build(); + + XContentBuilder mappings = XContentFactory.jsonBuilder(); + mappings.startObject(); + { + mappings.startObject("_doc"); + { + mappings.startObject("properties"); + { + mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); + { + mappings.field("type", "date"); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + + String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + prepareCreate(index).setSettings(s).setMapping(mappings).get(); + ensureGreen(index); + + GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).get(); + XContentBuilder expect = XContentFactory.jsonBuilder(); + expect.startObject(); + { + expect.startObject("_doc"); + { + expect.startObject(DataStreamTimestampFieldMapper.NAME); + { + expect.field("enabled", true); + } + expect.endObject(); + expect.startObject("properties"); + { + expect.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); + { + expect.field("type", "date"); + } + expect.endObject(); + } + expect.endObject(); + } + expect.endObject(); + } + expect.endObject(); + assertThat(getMappingsResponse.getMappings().get(index).source().string(), equalTo(Strings.toString(expect))); + } + + public void testTimestampNanos() throws IOException { + Settings s = Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") + .build(); + + XContentBuilder mappings = XContentFactory.jsonBuilder(); + mappings.startObject(); + { + mappings.startObject("_doc"); + { + mappings.startObject("properties"); + { + mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); + { + mappings.field("type", "date_nanos"); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + + String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + prepareCreate(index).setSettings(s).setMapping(mappings).get(); + ensureGreen(index); + + GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).get(); + XContentBuilder expect = XContentFactory.jsonBuilder(); + expect.startObject(); + { + expect.startObject("_doc"); + { + expect.startObject(DataStreamTimestampFieldMapper.NAME); + { + expect.field("enabled", true); + } + expect.endObject(); + expect.startObject("properties"); + { + expect.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); + { + expect.field("type", "date_nanos"); + } + expect.endObject(); + } + expect.endObject(); + } + expect.endObject(); + } + expect.endObject(); + assertThat(getMappingsResponse.getMappings().get(index).source().string(), equalTo(Strings.toString(expect))); + } + + public void testWithoutTimestamp() throws IOException { + Settings s = Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") + .build(); + + XContentBuilder mappings = XContentFactory.jsonBuilder(); + mappings.startObject(); + { + mappings.startObject("_doc"); + { + mappings.startObject("properties"); + { + mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); + { + mappings.field("type", "date"); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + + String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + prepareCreate(index).setSettings(s).setMapping(mappings).get(); + ensureGreen(index); + + MapperParsingException e = expectThrows( + MapperParsingException.class, + () -> index(index, XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject()) + ); + assertThat(e.getRootCause().getMessage(), containsString("data stream timestamp field [@timestamp] is missing")); + } + + public void testEnableTimestampRange() throws IOException { + long endTime = System.currentTimeMillis(); + long startTime = endTime - TimeUnit.DAYS.toMillis(1); + + Settings s = Settings.builder() + .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), startTime) + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), endTime) + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") + .build(); + + XContentBuilder mappings = XContentFactory.jsonBuilder(); + mappings.startObject(); + { + mappings.startObject("_doc"); + { + mappings.startObject("properties"); + { + mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); + { + mappings.field("type", randomBoolean() ? "date" : "date_nanos"); + } + mappings.endObject(); + mappings.startObject("foo"); + { + mappings.field("type", "keyword"); + mappings.field("time_series_dimension", true); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + + String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + prepareCreate(index).setSettings(s).setMapping(mappings).get(); + ensureGreen(index); + + IndexResponse indexResponse = index( + index, + XContentFactory.jsonBuilder() + .startObject() + .field("foo", "bar") + .field("@timestamp", randomLongBetween(startTime, endTime)) + .endObject() + ); + assertEquals(indexResponse.getResult(), Result.CREATED); + } + + public void testBadStartTime() throws IOException { + long endTime = System.currentTimeMillis(); + long startTime = endTime - TimeUnit.DAYS.toMillis(1); + + Settings s = Settings.builder() + .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), startTime) + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), endTime) + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") + .build(); + + XContentBuilder mappings = XContentFactory.jsonBuilder(); + mappings.startObject(); + { + mappings.startObject("_doc"); + { + mappings.startObject("properties"); + { + mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); + { + mappings.field("type", randomBoolean() ? "date" : "date_nanos"); + } + mappings.endObject(); + mappings.startObject("foo"); + { + mappings.field("type", "keyword"); + mappings.field("time_series_dimension", true); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + + String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + prepareCreate(index).setSettings(s).setMapping(mappings).get(); + ensureGreen(index); + + MapperParsingException e = expectThrows( + MapperParsingException.class, + () -> index( + index, + XContentFactory.jsonBuilder() + .startObject() + .field("foo", "bar") + .field("@timestamp", Math.max(startTime - randomLongBetween(1, 3), 0)) + .endObject() + ) + ); + assertThat(e.getRootCause().getMessage(), containsString("must be larger than")); + } + + public void testBadEndTime() throws IOException { + long endTime = System.currentTimeMillis(); + long startTime = endTime - TimeUnit.DAYS.toMillis(1); + + Settings s = Settings.builder() + .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), startTime) + .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), endTime) + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") + .build(); + + XContentBuilder mappings = XContentFactory.jsonBuilder(); + mappings.startObject(); + { + mappings.startObject("_doc"); + { + mappings.startObject("properties"); + { + mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); + { + mappings.field("type", randomBoolean() ? "date" : "date_nanos"); + } + mappings.endObject(); + mappings.startObject("foo"); + { + mappings.field("type", "keyword"); + mappings.field("time_series_dimension", true); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + + String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + prepareCreate(index).setSettings(s).setMapping(mappings).get(); + ensureGreen(index); + + MapperParsingException e = expectThrows( + MapperParsingException.class, + () -> index( + index, + XContentFactory.jsonBuilder() + .startObject() + .field("foo", "bar") + .field("@timestamp", endTime + randomLongBetween(0, 3)) + .endObject() + ) + ); + assertThat(e.getRootCause().getMessage(), containsString("must be smaller than")); + } + + public void testEnabledTimeStampMapper() throws IOException { + Settings s = Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") + .build(); + XContentBuilder mappings = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject(DataStreamTimestampFieldMapper.NAME); + if (randomBoolean()) { + mappings.field("enabled", true); + } else { + mappings.field("enabled", "true"); + } + mappings.endObject().endObject().endObject(); + + String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + prepareCreate(index).setSettings(s).setMapping(mappings).get(); + ensureGreen(index); + + GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).get(); + XContentBuilder expect = XContentFactory.jsonBuilder(); + expect.startObject(); + { + expect.startObject("_doc"); + { + expect.startObject(DataStreamTimestampFieldMapper.NAME); + { + expect.field("enabled", true); + } + expect.endObject(); + expect.startObject("properties"); + { + expect.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); + { + expect.field("type", "date"); + } + expect.endObject(); + } + expect.endObject(); + } + expect.endObject(); + } + expect.endObject(); + assertThat(getMappingsResponse.getMappings().get(index).source().string(), equalTo(Strings.toString(expect))); + } + + public void testAddTimeStampMeta() throws IOException { + Settings s = Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") + .build(); + + XContentBuilder mappings = XContentFactory.jsonBuilder(); + mappings.startObject(); + { + mappings.startObject("_doc"); + { + mappings.startObject(DataStreamTimestampFieldMapper.NAME); + { + mappings.field("enabled", true); + } + mappings.endObject(); + mappings.startObject("properties"); + { + mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); + { + mappings.field("type", "date"); + mappings.startObject("meta"); + { + mappings.field("field_meta", "time_series"); + } + mappings.endObject(); + } + mappings.endObject(); + mappings.startObject("foo"); + { + mappings.field("type", "keyword"); + mappings.field("time_series_dimension", true); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + } + mappings.endObject(); + + String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + prepareCreate(index).setSettings(s).setMapping(mappings).get(); + ensureGreen(index); + + IndexResponse indexResponse = index( + index, + XContentFactory.jsonBuilder() + .startObject() + .field("foo", "bar") + .field("@timestamp", System.currentTimeMillis()) + .field("new_field", "value") + .endObject() + ); + assertEquals(indexResponse.getResult(), Result.CREATED); + + XContentBuilder expect = XContentFactory.jsonBuilder(); + expect.startObject(); + { + expect.startObject("_doc"); + { + expect.startObject(DataStreamTimestampFieldMapper.NAME); + { + expect.field("enabled", true); + } + expect.endObject(); + expect.startObject("properties"); + { + expect.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); + { + expect.field("type", "date"); + expect.startObject("meta"); + { + expect.field("field_meta", "time_series"); + } + expect.endObject(); + } + expect.endObject(); + expect.startObject("foo"); + { + expect.field("type", "keyword"); + expect.field("time_series_dimension", true); + } + expect.endObject(); + expect.startObject("new_field"); + { + expect.field("type", "text"); + expect.startObject("fields"); + { + expect.startObject("keyword"); + { + expect.field("type", "keyword"); + expect.field("ignore_above", 256); + } + expect.endObject(); + } + expect.endObject(); + } + expect.endObject(); + } + expect.endObject(); + } + expect.endObject(); + } + expect.endObject(); + GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).get(); + assertThat(getMappingsResponse.getMappings().get(index).source().string(), equalTo(Strings.toString(expect))); + } + +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index b0e6ca7e6f0ba..ab0893aeaf5d1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -51,6 +51,7 @@ import org.elasticsearch.core.PathUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexService; @@ -100,7 +101,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService.validateTimestampFieldMapping; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.resolveSettings; import static org.elasticsearch.index.IndexModule.INDEX_RECOVERY_TYPE_SETTING; import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; @@ -1195,18 +1195,23 @@ private static ClusterBlocks.Builder createClusterBlocksBuilder(ClusterState cur return blocksBuilder; } - private static void updateIndexMappingsAndBuildSortOrder( + private void updateIndexMappingsAndBuildSortOrder( IndexService indexService, CreateIndexClusterStateUpdateRequest request, List> mappings, @Nullable IndexMetadata sourceMetadata ) throws IOException { MapperService mapperService = indexService.mapperService(); - for (Map mapping : mappings) { + IndexMode indexMode = indexService.getIndexSettings() != null ? indexService.getIndexSettings().getMode() : IndexMode.STANDARD; + List> mergedMappings = new ArrayList<>(1 + mappings.size()); + mergedMappings.add(indexMode.getDefaultMapping()); + mergedMappings.addAll(mappings); + for (Map mapping : mergedMappings) { if (mapping.isEmpty() == false) { mapperService.merge(MapperService.SINGLE_MAPPING_NAME, mapping, MergeReason.INDEX_TEMPLATE); } } + indexMode.validateTimestampFieldMapping(request.dataStreamName() != null, mapperService.mappingLookup()); if (sourceMetadata == null) { // now that the mapping is merged we can validate the index sort. @@ -1215,9 +1220,6 @@ private static void updateIndexMappingsAndBuildSortOrder( // (when all shards are copied in a single place). indexService.getIndexSortSupplier().get(); } - if (request.dataStreamName() != null) { - validateTimestampFieldMapping(mapperService.mappingLookup()); - } } private static void validateActiveShardCount(ActiveShardCount waitForActiveShards, IndexMetadata indexMetadata) { diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 7c672d65632c3..0107a1ad817d8 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -9,23 +9,21 @@ package org.elasticsearch.index; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MappingLookup; -import org.elasticsearch.index.mapper.MappingParserContext; -import org.elasticsearch.index.mapper.RootObjectMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; -import java.util.HashMap; +import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Optional; import java.util.stream.Stream; import static java.util.stream.Collectors.toSet; @@ -59,7 +57,16 @@ private void settingRequiresTimeSeries(Map, Object> settings, Setting public void validateAlias(@Nullable String indexRouting, @Nullable String searchRouting) {} @Override - public void completeMappings(MappingParserContext context, Map mapping, RootObjectMapper.Builder builder) {} + public void validateTimestampFieldMapping(boolean isDataStream, MappingLookup mappingLookup) throws IOException { + if (isDataStream) { + MetadataCreateDataStreamService.validateTimestampFieldMapping(mappingLookup); + } + } + + @Override + public Map getDefaultMapping() { + return Collections.emptyMap(); + } }, TIME_SERIES { @Override @@ -100,55 +107,35 @@ public void validateAlias(@Nullable String indexRouting, @Nullable String search } } - private String routingRequiredBad() { - return "routing is forbidden on CRUD operations that target indices in " + tsdbMode(); - } - - private String tsdbMode() { - return "[" + IndexSettings.MODE.getKey() + "=time_series]"; + @Override + public void validateTimestampFieldMapping(boolean isDataStream, MappingLookup mappingLookup) throws IOException { + MetadataCreateDataStreamService.validateTimestampFieldMapping(mappingLookup); } @Override - public void completeMappings(MappingParserContext context, Map mapping, RootObjectMapper.Builder builder) { - if (false == mapping.containsKey(DataStreamTimestampFieldMapper.NAME)) { - mapping.put(DataStreamTimestampFieldMapper.NAME, new HashMap<>(Map.of("enabled", true))); - } else { - validateTimeStampField(mapping.get(DataStreamTimestampFieldMapper.NAME)); - } - - Optional timestamp = builder.getBuilder(DataStreamTimestampFieldMapper.DEFAULT_PATH); - if (timestamp.isEmpty()) { - builder.add( - new DateFieldMapper.Builder( - DataStreamTimestampFieldMapper.DEFAULT_PATH, - DateFieldMapper.Resolution.MILLISECONDS, - DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, - context.scriptCompiler(), - DateFieldMapper.IGNORE_MALFORMED_SETTING.get(context.getSettings()), - context.getIndexSettings().getIndexVersionCreated() - ) - ); - } + public Map getDefaultMapping() { + return DEFAULT_TIME_SERIES_TIMESTAMP_MAPPING; } - private void validateTimeStampField(Object timestampFieldValue) { - if (false == (timestampFieldValue instanceof Map)) { - throw new IllegalArgumentException( - "time series index [" + DataStreamTimestampFieldMapper.NAME + "] meta field format error" - ); - } + private String routingRequiredBad() { + return "routing is forbidden on CRUD operations that target indices in " + tsdbMode(); + } - @SuppressWarnings("unchecked") - Map timeStampFieldValueMap = (Map) timestampFieldValue; - if (false == Maps.deepEquals(timeStampFieldValueMap, Map.of("enabled", true)) - && false == Maps.deepEquals(timeStampFieldValueMap, Map.of("enabled", "true"))) { - throw new IllegalArgumentException( - "time series index [" + DataStreamTimestampFieldMapper.NAME + "] meta field must be enabled" - ); - } + private String tsdbMode() { + return "[" + IndexSettings.MODE.getKey() + "=time_series]"; } }; + public static final Map DEFAULT_TIME_SERIES_TIMESTAMP_MAPPING = Map.of( + MapperService.SINGLE_MAPPING_NAME, + Map.of( + DataStreamTimestampFieldMapper.NAME, + Map.of("enabled", true), + "properties", + Map.of(DataStreamTimestampFieldMapper.DEFAULT_PATH, Map.of("type", DateFieldMapper.CONTENT_TYPE)) + ) + ); + private static final List> TIME_SERIES_UNSUPPORTED = List.of( IndexSortConfig.INDEX_SORT_FIELD_SETTING, IndexSortConfig.INDEX_SORT_ORDER_SETTING, @@ -181,7 +168,13 @@ private void validateTimeStampField(Object timestampFieldValue) { public abstract void validateAlias(@Nullable String indexRouting, @Nullable String searchRouting); /** - * Validate and/or modify the mappings after after they've been parsed. + * validate timestamp mapping for this index. + */ + public abstract void validateTimestampFieldMapping(boolean isDataStream, MappingLookup mappingLookup) throws IOException; + + /** + * get default mapping for this index. + * @return */ - public abstract void completeMappings(MappingParserContext context, Map mapping, RootObjectMapper.Builder builder); + public abstract Map getDefaultMapping(); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java index 33c06221b43cd..19e18efaf24e0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java @@ -96,8 +96,7 @@ Mapping parse(@Nullable String type, CompressedXContent source) throws MapperPar private Mapping parse(String type, Map mapping) throws MapperParsingException { MappingParserContext parserContext = parserContextSupplier.get(); - RootObjectMapper.Builder rootObjectMapperBuilder = rootObjectTypeParser.parse(type, mapping, parserContext); - parserContext.getIndexSettings().getMode().completeMappings(parserContext, mapping, rootObjectMapperBuilder); + RootObjectMapper rootObjectMapper = rootObjectTypeParser.parse(type, mapping, parserContext).build(MapperBuilderContext.ROOT); Map, MetadataFieldMapper> metadataMappers = metadataMappersSupplier.get(); Map meta = null; @@ -145,10 +144,6 @@ private Mapping parse(String type, Map mapping) throws MapperPar } checkNoRemainingFields(mapping, "Root mapping definition has unsupported parameters: "); - return new Mapping( - rootObjectMapperBuilder.build(MapperBuilderContext.ROOT), - metadataMappers.values().toArray(new MetadataFieldMapper[0]), - meta - ); + return new Mapping(rootObjectMapper, metadataMappers.values().toArray(new MetadataFieldMapper[0]), meta); } } diff --git a/server/src/test/java/org/elasticsearch/index/TimeSeriesModeTests.java b/server/src/test/java/org/elasticsearch/index/TimeSeriesModeTests.java index d16c1d273207c..925bed8c7ab77 100644 --- a/server/src/test/java/org/elasticsearch/index/TimeSeriesModeTests.java +++ b/server/src/test/java/org/elasticsearch/index/TimeSeriesModeTests.java @@ -9,34 +9,18 @@ package org.elasticsearch.index; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.index.mapper.DateFieldMapper.DateFieldType; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperServiceTestCase; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.StringFieldScript; import org.elasticsearch.script.StringFieldScript.LeafFactory; import org.elasticsearch.search.lookup.SearchLookup; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; -import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.util.Map; -import java.util.concurrent.TimeUnit; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; public class TimeSeriesModeTests extends MapperServiceTestCase { @@ -84,231 +68,6 @@ public void testSortOrder() { assertThat(e.getMessage(), equalTo("[index.mode=time_series] is incompatible with [index.sort.order]")); } - public void testAddsTimestamp() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - DocumentMapper mapper = createMapperService(s, mapping(b -> {})).documentMapper(); - MappedFieldType timestamp = mapper.mappers().getFieldType(DataStreamTimestampFieldMapper.DEFAULT_PATH); - assertThat(timestamp, instanceOf(DateFieldType.class)); - assertThat(((DateFieldType) timestamp).resolution(), equalTo(DateFieldMapper.Resolution.MILLISECONDS)); - - Mapper timestampField = mapper.mappers().getMapper(DataStreamTimestampFieldMapper.NAME); - assertThat(timestampField, instanceOf(DataStreamTimestampFieldMapper.class)); - assertTrue(((DataStreamTimestampFieldMapper) timestampField).isEnabled()); - } - - public void testTimestampMillis() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - DocumentMapper mapper = createMapperService(s, mapping(b -> b.startObject("@timestamp").field("type", "date").endObject())) - .documentMapper(); - MappedFieldType timestamp = mapper.mappers().getFieldType("@timestamp"); - assertThat(timestamp, instanceOf(DateFieldType.class)); - assertThat(((DateFieldType) timestamp).resolution(), equalTo(DateFieldMapper.Resolution.MILLISECONDS)); - - Mapper timestampField = mapper.mappers().getMapper(DataStreamTimestampFieldMapper.NAME); - assertThat(timestampField, instanceOf(DataStreamTimestampFieldMapper.class)); - assertTrue(((DataStreamTimestampFieldMapper) timestampField).isEnabled()); - } - - public void testTimestampNanos() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - DocumentMapper mapper = createMapperService(s, mapping(b -> b.startObject("@timestamp").field("type", "date_nanos").endObject())) - .documentMapper(); - MappedFieldType timestamp = mapper.mappers().getFieldType("@timestamp"); - assertThat(timestamp, instanceOf(DateFieldType.class)); - assertThat(((DateFieldType) timestamp).resolution(), equalTo(DateFieldMapper.Resolution.NANOSECONDS)); - - Mapper timestampField = mapper.mappers().getMapper(DataStreamTimestampFieldMapper.NAME); - assertThat(timestampField, instanceOf(DataStreamTimestampFieldMapper.class)); - assertTrue(((DataStreamTimestampFieldMapper) timestampField).isEnabled()); - } - - public void testBadTimestamp() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - String type = randomFrom("keyword", "integer", "long", "double", "text"); - Exception e = expectThrows( - IllegalArgumentException.class, - () -> createMapperService(s, mapping(b -> b.startObject("@timestamp").field("type", type).endObject())) - ); - assertThat( - e.getMessage(), - equalTo("data stream timestamp field [@timestamp] is of type [" + type + "], but [date,date_nanos] is expected") - ); - } - - public void testWithoutTimestamp() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - - DocumentMapper mapper = createMapperService(s, mapping(b -> b.startObject("@timestamp").field("type", "date").endObject())) - .documentMapper(); - MapperParsingException e = expectThrows( - MapperParsingException.class, - () -> mapper.parse( - new SourceToParse("1", BytesReference.bytes(XContentFactory.jsonBuilder().startObject().endObject()), XContentType.JSON) - ) - ); - assertThat(e.getRootCause().getMessage(), containsString("data stream timestamp field [@timestamp] is missing")); - } - - public void testEnableTimestampRange() throws IOException { - long endTime = System.currentTimeMillis(); - long startTime = endTime - TimeUnit.DAYS.toMillis(1); - - Settings s = Settings.builder() - .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), startTime) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), endTime) - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - DocumentMapper mapper = createMapperService( - s, - mapping(b -> b.startObject("@timestamp").field("type", randomBoolean() ? "date" : "date_nanos").endObject()) - ).documentMapper(); - ParsedDocument doc = mapper.parse( - new SourceToParse( - "1", - BytesReference.bytes( - XContentFactory.jsonBuilder().startObject().field("@timestamp", randomLongBetween(startTime, endTime)).endObject() - ), - XContentType.JSON - ) - ); - // Look, mah, no failure. - assertNotNull(doc.rootDoc().getNumericValue("@timestamp")); - } - - public void testBadStartTime() throws IOException { - long endTime = System.currentTimeMillis(); - long startTime = endTime - TimeUnit.DAYS.toMillis(1); - - Settings s = Settings.builder() - .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), startTime) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), endTime) - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - - DocumentMapper mapper = createMapperService(s, mapping(b -> b.startObject("@timestamp").field("type", "date").endObject())) - .documentMapper(); - MapperParsingException e = expectThrows( - MapperParsingException.class, - () -> mapper.parse( - new SourceToParse( - "1", - BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .field("@timestamp", Math.max(startTime - randomLongBetween(1, 3), 0)) - .endObject() - ), - XContentType.JSON - ) - ) - ); - assertThat(e.getRootCause().getMessage(), containsString("must be larger than")); - } - - public void testBadEndTime() throws IOException { - long endTime = System.currentTimeMillis(); - long startTime = endTime - TimeUnit.DAYS.toMillis(1); - - Settings s = Settings.builder() - .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), startTime) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), endTime) - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - - DocumentMapper mapper = createMapperService(s, mapping(b -> b.startObject("@timestamp").field("type", "date").endObject())) - .documentMapper(); - MapperParsingException e = expectThrows( - MapperParsingException.class, - () -> mapper.parse( - new SourceToParse( - "1", - BytesReference.bytes( - XContentFactory.jsonBuilder().startObject().field("@timestamp", endTime + randomLongBetween(0, 3)).endObject() - ), - XContentType.JSON - ) - ) - ); - assertThat(e.getRootCause().getMessage(), containsString("must be smaller than")); - } - - public void testEnabledTimeStampMapper() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - XContentBuilder mappings = XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject(DataStreamTimestampFieldMapper.NAME); - if (randomBoolean()) { - mappings.field("enabled", true); - } else { - mappings.field("enabled", "true"); - } - mappings.endObject().endObject().endObject(); - - DocumentMapper mapper = createMapperService(s, mappings).documentMapper(); - Mapper timestampField = mapper.mappers().getMapper(DataStreamTimestampFieldMapper.NAME); - assertThat(timestampField, instanceOf(DataStreamTimestampFieldMapper.class)); - assertTrue(((DataStreamTimestampFieldMapper) timestampField).isEnabled()); - } - - public void testDisabledTimeStampMapper() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - XContentBuilder mappings = XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject(DataStreamTimestampFieldMapper.NAME) - .field("enabled", false) - .endObject() - .endObject() - .endObject(); - - Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(s, mappings).documentMapper()); - assertThat( - e.getMessage(), - equalTo("Failed to parse mapping: time series index [_data_stream_timestamp] meta field must be enabled") - ); - } - - public void testBadTimeStampMapper() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - XContentBuilder mappings = XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .field(DataStreamTimestampFieldMapper.NAME, "enabled") - .endObject() - .endObject(); - - Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(s, mappings).documentMapper()); - assertThat(e.getMessage(), equalTo("Failed to parse mapping: time series index [_data_stream_timestamp] meta field format error")); - } - public void testWithoutRoutingPath() { Settings s = Settings.builder().put(IndexSettings.MODE.getKey(), "time_series").build(); Exception e = expectThrows(IllegalArgumentException.class, () -> IndexSettings.MODE.get(s)); From e9be9a10528ee5be2a89d25970c018798393aa2c Mon Sep 17 00:00:00 2001 From: Joe Gallo Date: Mon, 22 Nov 2021 09:36:33 -0500 Subject: [PATCH 02/88] Adjust ILMHistoryStore bulk size (#80902) --- .../core/action/DeleteDataStreamAction.java | 2 +- .../datastreams/DataStreamIT.java | 12 +++-------- .../xpack/ilm/history/ILMHistoryStore.java | 12 ++++++++--- .../ilm/history/ILMHistoryStoreTests.java | 20 ------------------- 4 files changed, 13 insertions(+), 33 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DeleteDataStreamAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DeleteDataStreamAction.java index 628b9c34440c2..b0c2345925743 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DeleteDataStreamAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/DeleteDataStreamAction.java @@ -45,7 +45,7 @@ public static class Request extends MasterNodeRequest implements Indice private final boolean wildcardExpressionsOriginallySpecified; private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true, false, false, true, false); - public Request(String[] names) { + public Request(String... names) { this.names = Objects.requireNonNull(names); this.wildcardExpressionsOriginallySpecified = Arrays.stream(names).anyMatch(Regex::isSimpleMatchPattern); } diff --git a/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index 035cf3ff4be71..be073aa840d4d 100644 --- a/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -40,7 +40,6 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -120,16 +119,11 @@ protected Collection> nodePlugins() { @After public void cleanup() { - AcknowledgedResponse response = client().execute( - DeleteDataStreamAction.INSTANCE, - new DeleteDataStreamAction.Request(new String[] { "*" }) - ).actionGet(); - assertAcked(response); + DeleteDataStreamAction.Request deleteDataStreamsRequest = new DeleteDataStreamAction.Request("*"); + assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, deleteDataStreamsRequest).actionGet()); - DeleteDataStreamAction.Request deleteDSRequest = new DeleteDataStreamAction.Request(new String[] { "*" }); - client().execute(DeleteDataStreamAction.INSTANCE, deleteDSRequest).actionGet(); DeleteComposableIndexTemplateAction.Request deleteTemplateRequest = new DeleteComposableIndexTemplateAction.Request("*"); - client().execute(DeleteComposableIndexTemplateAction.INSTANCE, deleteTemplateRequest).actionGet(); + assertAcked(client().execute(DeleteComposableIndexTemplateAction.INSTANCE, deleteTemplateRequest).actionGet()); } public void testBasicScenario() throws Exception { diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStore.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStore.java index 708ec7b60a1a2..58478bb1cdfca 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStore.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStore.java @@ -22,7 +22,6 @@ import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.threadpool.ThreadPool; @@ -53,6 +52,13 @@ public class ILMHistoryStore implements Closeable { public static final String ILM_HISTORY_DATA_STREAM = "ilm-history-" + INDEX_TEMPLATE_VERSION; + private static int ILM_HISTORY_BULK_SIZE = StrictMath.toIntExact( + ByteSizeValue.parseBytesSizeValue( + System.getProperty("es.indices.lifecycle.history.bulk.size", "50MB"), + "es.indices.lifecycle.history.bulk.size" + ).getBytes() + ); + private final boolean ilmHistoryEnabled; private final BulkProcessor processor; private final ThreadPool threadPool; @@ -123,8 +129,8 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) logger.error(new ParameterizedMessage("failed to index {} items into ILM history index", items), failure); } }, "ilm-history-store") - .setBulkActions(100) - .setBulkSize(new ByteSizeValue(5, ByteSizeUnit.MB)) + .setBulkActions(-1) + .setBulkSize(ByteSizeValue.ofBytes(ILM_HISTORY_BULK_SIZE)) .setFlushInterval(TimeValue.timeValueSeconds(5)) .setConcurrentRequests(1) .setBackoffPolicy(BackoffPolicy.exponentialBackoff(TimeValue.timeValueMillis(1000), 3)) diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java index 3b5e67efaff7b..0dbca6b5f2e2c 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.TriFunction; @@ -33,16 +32,12 @@ import org.elasticsearch.test.client.NoOpClient; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.ilm.LifecycleExecutionState; -import org.elasticsearch.xpack.core.template.IndexTemplateConfig; import org.hamcrest.Matchers; import org.junit.After; import org.junit.Before; -import java.io.IOException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -82,20 +77,6 @@ public void setup() { historyStore = new ILMHistoryStore(Settings.EMPTY, client, clusterService, threadPool); } - private ComposableIndexTemplate parseIndexTemplate(IndexTemplateConfig c) { - try { - return ComposableIndexTemplate.parse( - JsonXContent.jsonXContent.createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - c.loadBytes() - ) - ); - } catch (IOException e) { - throw new IllegalStateException(e); - } - } - @After public void setdown() { historyStore.close(); @@ -122,7 +103,6 @@ public void testNoActionIfDisabled() throws Exception { } } - @SuppressWarnings("unchecked") public void testPut() throws Exception { String policyId = randomAlphaOfLength(5); final long timestamp = randomNonNegativeLong(); From 6b2685f4d1ae9e01746bab2be317c8e2be9de755 Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Mon, 22 Nov 2021 16:41:47 +0100 Subject: [PATCH 03/88] make *.routing.allocation.* list based setting (#80420) --- .../allocation/FilteringAllocationIT.java | 3 +- .../cluster/metadata/IndexMetadata.java | 35 ++++++++-------- .../cluster/node/DiscoveryNodeFilters.java | 17 ++++---- .../decider/FilterAllocationDecider.java | 28 +++++++------ .../common/settings/Setting.java | 8 ++++ .../node/DiscoveryNodeFiltersTests.java | 4 +- .../decider/FilterAllocationDeciderTests.java | 40 +++++++++++++++++-- .../common/settings/ScopedSettingsTests.java | 4 +- .../AllocationFilteringIntegTests.java | 4 +- 9 files changed, 92 insertions(+), 51 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index c8f65d6397d64..54db6abab91dd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -20,7 +20,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESIntegTestCase; @@ -241,7 +240,7 @@ public void testDisablingAllocationFiltering() { public void testInvalidIPFilterClusterSettings() { String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip"); - Setting filterSetting = randomFrom( + var filterSetting = randomFrom( FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index d13e93ed4866b..853ff0bf5c0de 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -70,9 +70,9 @@ import java.util.function.Function; import static org.elasticsearch.cluster.metadata.Metadata.CONTEXT_MODE_PARAM; -import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.IP_VALIDATOR; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; +import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.validateIpValue; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; @@ -351,21 +351,22 @@ public static APIBlock readFrom(StreamInput input) throws IOException { public static final String INDEX_ROUTING_REQUIRE_GROUP_PREFIX = "index.routing.allocation.require"; public static final String INDEX_ROUTING_INCLUDE_GROUP_PREFIX = "index.routing.allocation.include"; public static final String INDEX_ROUTING_EXCLUDE_GROUP_PREFIX = "index.routing.allocation.exclude"; - public static final Setting.AffixSetting INDEX_ROUTING_REQUIRE_GROUP_SETTING = Setting.prefixKeySetting( + + public static final Setting.AffixSetting> INDEX_ROUTING_REQUIRE_GROUP_SETTING = Setting.prefixKeySetting( INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ".", - key -> Setting.simpleString(key, value -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.IndexScope) + key -> Setting.stringListSetting(key, value -> validateIpValue(key, value), Property.Dynamic, Property.IndexScope) ); - public static final Setting.AffixSetting INDEX_ROUTING_INCLUDE_GROUP_SETTING = Setting.prefixKeySetting( + public static final Setting.AffixSetting> INDEX_ROUTING_INCLUDE_GROUP_SETTING = Setting.prefixKeySetting( INDEX_ROUTING_INCLUDE_GROUP_PREFIX + ".", - key -> Setting.simpleString(key, value -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.IndexScope) + key -> Setting.stringListSetting(key, value -> validateIpValue(key, value), Property.Dynamic, Property.IndexScope) ); - public static final Setting.AffixSetting INDEX_ROUTING_EXCLUDE_GROUP_SETTING = Setting.prefixKeySetting( + public static final Setting.AffixSetting> INDEX_ROUTING_EXCLUDE_GROUP_SETTING = Setting.prefixKeySetting( INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + ".", - key -> Setting.simpleString(key, value -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.IndexScope) + key -> Setting.stringListSetting(key, value -> validateIpValue(key, value), Property.Dynamic, Property.IndexScope) ); - public static final Setting.AffixSetting INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING = Setting.prefixKeySetting( + public static final Setting.AffixSetting> INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING = Setting.prefixKeySetting( "index.routing.allocation.initial_recovery.", - key -> Setting.simpleString(key) + key -> Setting.stringListSetting(key) ); /** @@ -1395,33 +1396,33 @@ public IndexMetadata build() { filledInSyncAllocationIds.put(i, Collections.emptySet()); } } - final Map requireMap = INDEX_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(settings); + var requireMap = INDEX_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(settings); final DiscoveryNodeFilters requireFilters; if (requireMap.isEmpty()) { requireFilters = null; } else { - requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap); + requireFilters = DiscoveryNodeFilters.buildFromKeyValues(AND, requireMap); } - Map includeMap = INDEX_ROUTING_INCLUDE_GROUP_SETTING.getAsMap(settings); + var includeMap = INDEX_ROUTING_INCLUDE_GROUP_SETTING.getAsMap(settings); final DiscoveryNodeFilters includeFilters; if (includeMap.isEmpty()) { includeFilters = null; } else { - includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap); + includeFilters = DiscoveryNodeFilters.buildFromKeyValues(OR, includeMap); } - Map excludeMap = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getAsMap(settings); + var excludeMap = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getAsMap(settings); final DiscoveryNodeFilters excludeFilters; if (excludeMap.isEmpty()) { excludeFilters = null; } else { - excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap); + excludeFilters = DiscoveryNodeFilters.buildFromKeyValues(OR, excludeMap); } - Map initialRecoveryMap = INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getAsMap(settings); + var initialRecoveryMap = INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getAsMap(settings); final DiscoveryNodeFilters initialRecoveryFilters; if (initialRecoveryMap.isEmpty()) { initialRecoveryFilters = null; } else { - initialRecoveryFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, initialRecoveryMap); + initialRecoveryFilters = DiscoveryNodeFilters.buildFromKeyValues(OR, initialRecoveryMap); } Version indexCreatedVersion = indexCreatedVersion(settings); diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java index e1723728d2230..37c534fc553fc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodeFilters.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster.node; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.regex.Regex; @@ -17,9 +16,9 @@ import org.elasticsearch.core.Nullable; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Set; -import java.util.function.BiConsumer; public class DiscoveryNodeFilters { @@ -35,22 +34,22 @@ public enum OpType { * "_ip", "_host_ip", and "_publish_ip" and ensuring each of their comma separated values * that has no wildcards is a valid IP address. */ - public static final BiConsumer IP_VALIDATOR = (propertyKey, rawValue) -> { - if (rawValue != null) { + public static void validateIpValue(String propertyKey, List values) { + if (values != null) { if (propertyKey.endsWith("._ip") || propertyKey.endsWith("._host_ip") || propertyKey.endsWith("_publish_ip")) { - for (String value : Strings.tokenizeToStringArray(rawValue, ",")) { + for (String value : values) { if (Regex.isSimpleMatchPattern(value) == false && InetAddresses.isInetAddress(value) == false) { throw new IllegalArgumentException("invalid IP address [" + value + "] for [" + propertyKey + "]"); } } } } - }; + } - public static DiscoveryNodeFilters buildFromKeyValue(OpType opType, Map filters) { + public static DiscoveryNodeFilters buildFromKeyValues(OpType opType, Map> filters) { Map bFilters = new HashMap<>(); - for (Map.Entry entry : filters.entrySet()) { - String[] values = Strings.tokenizeToStringArray(entry.getValue(), ","); + for (var entry : filters.entrySet()) { + String[] values = entry.getValue().toArray(String[]::new); if (values.length > 0 && entry.getKey() != null) { bFilters.put(entry.getKey(), values); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index 600e3044a2577..4da3f6a925949 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -20,11 +20,12 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import java.util.List; import java.util.Map; -import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.IP_VALIDATOR; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; +import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.validateIpValue; /** * This {@link AllocationDecider} control shard allocation by include and @@ -60,17 +61,18 @@ public class FilterAllocationDecider extends AllocationDecider { private static final String CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX = "cluster.routing.allocation.require"; private static final String CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX = "cluster.routing.allocation.include"; private static final String CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX = "cluster.routing.allocation.exclude"; - public static final Setting.AffixSetting CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.prefixKeySetting( + + public static final Setting.AffixSetting> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.prefixKeySetting( CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX + ".", - key -> Setting.simpleString(key, value -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.NodeScope) + key -> Setting.stringListSetting(key, value -> validateIpValue(key, value), Property.Dynamic, Property.NodeScope) ); - public static final Setting.AffixSetting CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.prefixKeySetting( + public static final Setting.AffixSetting> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.prefixKeySetting( CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX + ".", - key -> Setting.simpleString(key, value -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.NodeScope) + key -> Setting.stringListSetting(key, value -> validateIpValue(key, value), Property.Dynamic, Property.NodeScope) ); - public static final Setting.AffixSetting CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.prefixKeySetting( + public static final Setting.AffixSetting> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.prefixKeySetting( CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX + ".", - key -> Setting.simpleString(key, value -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.NodeScope) + key -> Setting.stringListSetting(key, value -> validateIpValue(key, value), Property.Dynamic, Property.NodeScope) ); private volatile DiscoveryNodeFilters clusterRequireFilters; @@ -224,15 +226,15 @@ private Decision shouldClusterFilter(DiscoveryNode node, RoutingAllocation alloc return null; } - private void setClusterRequireFilters(Map filters) { - clusterRequireFilters = DiscoveryNodeFilters.trimTier(DiscoveryNodeFilters.buildFromKeyValue(AND, filters)); + private void setClusterRequireFilters(Map> filters) { + clusterRequireFilters = DiscoveryNodeFilters.trimTier(DiscoveryNodeFilters.buildFromKeyValues(AND, filters)); } - private void setClusterIncludeFilters(Map filters) { - clusterIncludeFilters = DiscoveryNodeFilters.trimTier(DiscoveryNodeFilters.buildFromKeyValue(OR, filters)); + private void setClusterIncludeFilters(Map> filters) { + clusterIncludeFilters = DiscoveryNodeFilters.trimTier(DiscoveryNodeFilters.buildFromKeyValues(OR, filters)); } - private void setClusterExcludeFilters(Map filters) { - clusterExcludeFilters = DiscoveryNodeFilters.trimTier(DiscoveryNodeFilters.buildFromKeyValue(OR, filters)); + private void setClusterExcludeFilters(Map> filters) { + clusterExcludeFilters = DiscoveryNodeFilters.trimTier(DiscoveryNodeFilters.buildFromKeyValues(OR, filters)); } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index d016211b46b3a..45298272f97c1 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -1637,6 +1637,14 @@ public static Setting memorySizeSetting(String key, String defaul return new Setting<>(key, (s) -> defaultPercentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties); } + public static Setting> stringListSetting(String key, Property... properties) { + return listSetting(key, List.of(), Function.identity(), v -> {}, properties); + } + + public static Setting> stringListSetting(String key, Validator> validator, Property... properties) { + return listSetting(key, List.of(), Function.identity(), validator, properties); + } + public static Setting> listSetting( final String key, final List defaultStringValue, diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java index af7e85a5bc0a2..06a0a8eb95fc5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java @@ -318,7 +318,7 @@ private Settings shuffleSettings(Settings source) { } public static DiscoveryNodeFilters buildFromSettings(DiscoveryNodeFilters.OpType opType, String prefix, Settings settings) { - Setting.AffixSetting setting = Setting.prefixKeySetting(prefix, key -> Setting.simpleString(key)); - return DiscoveryNodeFilters.buildFromKeyValue(opType, setting.getAsMap(settings)); + var values = Setting.prefixKeySetting(prefix, key -> Setting.stringListSetting(key)).getAsMap(settings); + return DiscoveryNodeFilters.buildFromKeyValues(opType, values); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java index e6754d2b94aff..81acdbc8d2168 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDeciderTests.java @@ -24,13 +24,13 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.snapshots.EmptySnapshotsInfoService; import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; import java.util.Collections; +import java.util.List; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_RESIZE_SOURCE_NAME; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_RESIZE_SOURCE_UUID; @@ -190,7 +190,7 @@ private ClusterState createInitialClusterState(AllocationService service, Settin public void testInvalidIPFilter() { String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip"); - Setting filterSetting = randomFrom( + var filterSetting = randomFrom( IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING, IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING, IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING @@ -209,7 +209,7 @@ public void testInvalidIPFilter() { } public void testNull() { - Setting filterSetting = randomFrom( + var filterSetting = randomFrom( IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING, IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING, IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING @@ -224,7 +224,7 @@ public void testNull() { public void testWildcardIPFilter() { String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip"); - Setting filterSetting = randomFrom( + var filterSetting = randomFrom( IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING, IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING, IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING @@ -238,4 +238,36 @@ public void testWildcardIPFilter() { "test ip validation" ); } + + public void testSettingsAcceptComaSeparatedValues() { + String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip"); + var filterSetting = randomFrom( + IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING, + IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING, + IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING + ); + + new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS).updateDynamicSettings( + Settings.builder().put(filterSetting.getKey() + ipKey, "192.168.0.10,192.168.0.11").build(), + Settings.builder().put(Settings.EMPTY), + Settings.builder(), + "test ip validation" + ); + } + + public void testSettingsAcceptArrayOfValues() { + String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip"); + var filterSetting = randomFrom( + IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING, + IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING, + IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING + ); + + new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS).updateDynamicSettings( + Settings.builder().putList(filterSetting.getKey() + ipKey, List.of("192.168.0.10", "192.168.0.11")).build(), + Settings.builder().put(Settings.EMPTY), + Settings.builder(), + "test ip validation" + ); + } } diff --git a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java index 6fc0d69bc0b16..178836f1b9327 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -114,9 +114,9 @@ public void testResetSettingWithIPValidator() { currentSettings, new HashSet<>(Arrays.asList(dynamicSetting, IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING)) ); - Map s = IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(currentSettings); + var s = IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(currentSettings); assertEquals(1, s.size()); - assertEquals("192.168.0.1,127.0.0.1", s.get("_ip")); + assertEquals(List.of("192.168.0.1", "127.0.0.1"), s.get("_ip")); Settings.Builder builder = Settings.builder(); Settings updates = Settings.builder().putNull("index.routing.allocation.require._ip").put("index.some.dyn.setting", 1).build(); settings.validate(updates, false); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/AllocationFilteringIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/AllocationFilteringIntegTests.java index 27db67b82adaa..16cb43d8e944a 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/AllocationFilteringIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/AllocationFilteringIntegTests.java @@ -111,9 +111,9 @@ public void testReplacesIncludeFilterWithExcludeFilter() throws InterruptedExcep * @param mountSettingIsPositive whether {@code mountSetting} is positive (i.e. include/require) or negative (i.e. exclude) */ private void runTest( - Setting.AffixSetting indexSetting, + Setting.AffixSetting> indexSetting, boolean indexSettingIsPositive, - @Nullable Setting.AffixSetting mountSetting, + @Nullable Setting.AffixSetting> mountSetting, boolean mountSettingIsPositive ) throws InterruptedException { final List nodes = internalCluster().startNodes(2); From 94f760178298f0c0b190bc9986d533674328fbfb Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 22 Nov 2021 15:53:15 +0000 Subject: [PATCH 04/88] Hide TransportChannel#logger (#80868) In #57573 we introduced a `public static Logger logger` field in `TransportChannel` which some IDEs tend to automatically import if you mention an as-yet-undeclared `logger` field. This commit reverts that small part of #57573 so that we go back to using a private logger belonging to `ChannelActionListener` instead. --- .../action/support/ChannelActionListener.java | 15 +++++++++++++- .../transport/TransportChannel.java | 20 ------------------- 2 files changed, 14 insertions(+), 21 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java index 6e04e65f04434..8edc7e591bbe6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java @@ -8,6 +8,9 @@ package org.elasticsearch.action.support; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; @@ -17,6 +20,8 @@ public final class ChannelActionListener { + private static final Logger logger = LogManager.getLogger(); + private final TransportChannel channel; private final Request request; private final String actionName; @@ -38,7 +43,15 @@ public void onResponse(Response response) { @Override public void onFailure(Exception e) { - TransportChannel.sendErrorResponse(channel, actionName, request, e); + try { + channel.sendResponse(e); + } catch (Exception sendException) { + sendException.addSuppressed(e); + logger.warn( + () -> new ParameterizedMessage("Failed to send error response for action [{}] and request [{}]", actionName, request), + sendException + ); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/transport/TransportChannel.java b/server/src/main/java/org/elasticsearch/transport/TransportChannel.java index d7a6d1d50d87d..185b9ce6b88a8 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportChannel.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportChannel.java @@ -8,9 +8,6 @@ package org.elasticsearch.transport; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; import java.io.IOException; @@ -20,8 +17,6 @@ */ public interface TransportChannel { - Logger logger = LogManager.getLogger(TransportChannel.class); - String getProfileName(); String getChannelType(); @@ -36,19 +31,4 @@ public interface TransportChannel { default Version getVersion() { return Version.CURRENT; } - - /** - * A helper method to send an exception and handle and log a subsequent exception - */ - static void sendErrorResponse(TransportChannel channel, String actionName, TransportRequest request, Exception e) { - try { - channel.sendResponse(e); - } catch (Exception sendException) { - sendException.addSuppressed(e); - logger.warn( - () -> new ParameterizedMessage("Failed to send error response for action [{}] and request [{}]", actionName, request), - sendException - ); - } - } } From 3476f68d373f9833ed3f15dfae45ed648867bd77 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 22 Nov 2021 11:52:28 -0500 Subject: [PATCH 05/88] Skip test in mixed cluster It has a different error message from 8.0.0 --- .../resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml index 7ac5bc4be4d7b..44d0d42b804bf 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml @@ -160,7 +160,7 @@ reject @timestamp with wrong type: --- reject timestamp meta field with wrong type: - skip: - version: " - 7.99.99" + version: " - 8.0.99" reason: introduced in 8.0.0 to be backported to 7.16.0 - do: From f3a69ae4b19817cda0dc53991e029a32d1f98f40 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 22 Nov 2021 09:25:01 -0800 Subject: [PATCH 06/88] [DOCS] Adds missing query parameters to ML APIs (#80863) --- docs/reference/cat/anomaly-detectors.asciidoc | 2 +- docs/reference/cat/datafeeds.asciidoc | 2 +- .../anomaly-detection/apis/close-job.asciidoc | 15 ++++-- .../apis/get-calendar-event.asciidoc | 26 +++------- .../apis/get-calendar.asciidoc | 10 ++-- .../apis/get-category.asciidoc | 6 +-- .../apis/get-datafeed-stats.asciidoc | 15 ++---- .../apis/get-datafeed.asciidoc | 15 ++---- .../apis/get-influencer.asciidoc | 2 +- .../apis/get-job-stats.asciidoc | 14 +++--- .../anomaly-detection/apis/get-job.asciidoc | 14 +++--- .../apis/get-overall-buckets.asciidoc | 18 ++++--- .../apis/get-record.asciidoc | 27 +++++++--- .../apis/get-snapshot.asciidoc | 2 +- .../anomaly-detection/apis/open-job.asciidoc | 11 ++-- .../apis/put-datafeed.asciidoc | 21 ++++++++ .../apis/revert-snapshot.asciidoc | 50 +++++++++++-------- .../apis/stop-datafeed.asciidoc | 2 +- .../apis/upgrade-job-model-snapshot.asciidoc | 13 +++-- docs/reference/ml/ml-shared.asciidoc | 8 +-- 20 files changed, 157 insertions(+), 116 deletions(-) diff --git a/docs/reference/cat/anomaly-detectors.asciidoc b/docs/reference/cat/anomaly-detectors.asciidoc index 9c19d7e786121..7fd3ce63b50e8 100644 --- a/docs/reference/cat/anomaly-detectors.asciidoc +++ b/docs/reference/cat/anomaly-detectors.asciidoc @@ -42,7 +42,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-jobs] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-jobs] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=bytes] diff --git a/docs/reference/cat/datafeeds.asciidoc b/docs/reference/cat/datafeeds.asciidoc index 652a95a46df30..82bb4a844cdd9 100644 --- a/docs/reference/cat/datafeeds.asciidoc +++ b/docs/reference/cat/datafeeds.asciidoc @@ -42,7 +42,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=datafeed-id] `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-datafeeds] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-datafeeds] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=http-format] diff --git a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc index db25369c582d2..8d376a80791b2 100644 --- a/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/close-job.asciidoc @@ -30,10 +30,6 @@ A job can be opened and closed multiple times throughout its lifecycle. A closed job cannot receive data or perform analysis operations, but you can still explore and navigate results. -You can close multiple {anomaly-jobs} in a single API request by using a group -name, a comma-separated list of jobs, or a wildcard expression. You can close -all jobs by using `_all` or by specifying `*` as the ``. - If you close an {anomaly-job} whose {dfeed} is running, the request first tries to stop the {dfeed}. This behavior is equivalent to calling <> with the same `timeout` and `force` parameters @@ -64,13 +60,16 @@ results the job might have recently produced or might produce in the future. ``:: (Required, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection-wildcard] ++ +You can close all jobs by using `_all` or by specifying `*` as the job +identifier. [[ml-close-job-query-parms]] == {api-query-parms-title} `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-jobs] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-jobs] `force`:: (Optional, Boolean) Use to close a failed job, or to forcefully close a job @@ -80,6 +79,12 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-jobs] (Optional, <>) Controls the time to wait until a job has closed. The default value is 30 minutes. +[[ml-close-job-request-body]] +== {api-request-body-title} + +You can also specify the query parameters (such as `allow_no_match` and `force`) +in the request body. + [[ml-close-job-response-codes]] == {api-response-codes-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc index 4bc9921d90c43..0431aa8aa4e05 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc @@ -23,11 +23,6 @@ Requires the `monitor_ml` cluster privilege. This privilege is included in the [[ml-get-calendar-event-desc]] == {api-description-title} -You can get scheduled event information for multiple calendars in a single -API request by using a comma-separated list of ids or a wildcard expression. -You can get scheduled event information for all calendars by using `_all` or `*` -as the ``. - For more information, see {ml-docs}/ml-ad-finding-anomalies.html#ml-ad-calendars[Calendars and scheduled events]. @@ -37,6 +32,11 @@ For more information, see ``:: (Required, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=calendar-id] ++ +You can get scheduled event information for multiple calendars in a single +API request by using a comma-separated list of ids or a wildcard expression. +You can get scheduled event information for all calendars by using `_all` or `*` +as the calendar identifier. [[ml-get-calendar-event-query-parms]] == {api-query-parms-title} @@ -63,15 +63,8 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=calendar-id] [[ml-get-calendar-event-request-body]] == {api-request-body-title} -`end`:: - (Optional, string) Specifies to get events with timestamps earlier than this - time. Defaults to unset, which means results are not - limited to specific timestamps. - -`job_id`:: - (Optional, string) Specifies to get events for a specific {anomaly-job} - identifier or job group. It must be used with a calendar identifier of `_all` - or `*`. +You can also specify some of the query parameters (such as `end` and +`job_id`) in the request body. `page.from`:: (Optional, integer) Skips the specified number of events. Defaults to `0`. @@ -80,11 +73,6 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=calendar-id] (Optional, integer) Specifies the maximum number of events to obtain. Defaults to `100`. -`start`:: - (Optional, string) Specifies to get events with timestamps after this time. - Defaults to unset, which means results are not limited to specific - timestamps. - [[ml-get-calendar-event-results]] == {api-response-body-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc index 1e2075d059b96..331aa10286db7 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc @@ -23,11 +23,6 @@ Requires the `monitor_ml` cluster privilege. This privilege is included in the [[ml-get-calendar-desc]] == {api-description-title} -You can get information for multiple calendars in a single API request by using a -comma-separated list of ids or a wildcard expression. You can get -information for all calendars by using `_all`, by specifying `*` as the -``, or by omitting the ``. - For more information, see {ml-docs}/ml-ad-finding-anomalies.html#ml-ad-calendars[Calendars and scheduled events]. @@ -37,6 +32,11 @@ For more information, see ``:: (Required, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=calendar-id] ++ +You can get information for multiple calendars in a single API request by using +a comma-separated list of ids or a wildcard expression. You can get information +for all calendars by using `_all`, by specifying `*` as the calendar identifier, +or by omitting the identifier. [[ml-get-calendar-query-parms]] == {api-query-parms-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc index af8ebd18c94b7..37a769b838d6a 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc @@ -65,6 +65,9 @@ Defaults to `100`. [[ml-get-category-request-body]] == {api-request-body-title} +You can also specify the `partition_field_value` query parameter in the +request body. + `page`.`from`:: (Optional, integer) Skips the specified number of categories. Defaults to `0`. @@ -72,9 +75,6 @@ Defaults to `100`. (Optional, integer) Specifies the maximum number of categories to obtain. Defaults to `100`. -`partition_field_value`:: -(Optional, string) Only return categories for the specified partition. - [[ml-get-category-results]] == {api-response-body-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc index 6daa789014750..809019f11a619 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-datafeed-stats.asciidoc @@ -29,11 +29,6 @@ Requires the `monitor_ml` cluster privilege. This privilege is included in the [[ml-get-datafeed-stats-desc]] == {api-description-title} -You can get statistics for multiple {dfeeds} in a single API request by using a -comma-separated list of {dfeeds} or a wildcard expression. You can get -statistics for all {dfeeds} by using `_all`, by specifying `*` as the -``, or by omitting the ``. - If the {dfeed} is stopped, the only information you receive is the `datafeed_id` and the `state`. @@ -46,17 +41,17 @@ IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=datafeed-id-wildcard] + --- -If you do not specify one of these options, the API returns information about -all {dfeeds}. --- +You can get statistics for multiple {dfeeds} in a single API request by using a +comma-separated list of {dfeeds} or a wildcard expression. You can get +statistics for all {dfeeds} by using `_all`, by specifying `*` as the {dfeed} +identifier, or by omitting the identifier. [[ml-get-datafeed-stats-query-parms]] == {api-query-parms-title} `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-datafeeds] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-datafeeds] [role="child_attributes"] [[ml-get-datafeed-stats-results]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc index d5b4762e3c637..74a0f55de09ec 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-datafeed.asciidoc @@ -29,11 +29,6 @@ Requires the `monitor_ml` cluster privilege. This privilege is included in the [[ml-get-datafeed-desc]] == {api-description-title} -You can get information for multiple {dfeeds} in a single API request by using a -comma-separated list of {dfeeds} or a wildcard expression. You can get -information for all {dfeeds} by using `_all`, by specifying `*` as the -``, or by omitting the ``. - IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. [[ml-get-datafeed-path-parms]] @@ -43,17 +38,17 @@ IMPORTANT: This API returns a maximum of 10,000 {dfeeds}. (Optional, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=datafeed-id-wildcard] + --- -If you do not specify one of these options, the API returns information about -all {dfeeds}. --- +You can get information for multiple {dfeeds} in a single API request by using a +comma-separated list of {dfeeds} or a wildcard expression. You can get +information for all {dfeeds} by using `_all`, by specifying `*` as the +{dfeed} identifier, or by omitting the identifier. [[ml-get-datafeed-query-parms]] == {api-query-parms-title} `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-datafeeds] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-datafeeds] `exclude_generated`:: (Optional, Boolean) diff --git a/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc index 4c508ded0a144..1281a27073e58 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc @@ -74,7 +74,7 @@ timestamps. [[ml-get-influencer-request-body]] == {api-request-body-title} -You can also specify the query parameters (such as `desc` and +You can also specify some of the query parameters (such as `desc` and `end`) in the request body. `page`.`from`:: diff --git a/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc index 0504cd0c4a3f1..4c1fbfe2da3de 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job-stats.asciidoc @@ -27,11 +27,6 @@ Requires the `monitor_ml` cluster privilege. This privilege is included in the [[ml-get-job-stats-desc]] == {api-description-title} -You can get statistics for multiple {anomaly-jobs} in a single API request by -using a group name, a comma-separated list of jobs, or a wildcard expression. -You can get statistics for all {anomaly-jobs} by using `_all`, by specifying `*` -as the ``, or by omitting the ``. - IMPORTANT: This API returns a maximum of 10,000 jobs. [[ml-get-job-stats-path-parms]] @@ -39,14 +34,19 @@ IMPORTANT: This API returns a maximum of 10,000 jobs. ``:: (Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection-default] +Identifier for the anomaly detection job. It can be a job identifier, a group +name, or a wildcard expression. You can get statistics for multiple +{anomaly-jobs} in a single API request by using a group name, a comma-separated +list of jobs, or a wildcard expression. You can get statistics for all +{anomaly-jobs} by using `_all`, by specifying `*` as the job identifier, or by +omitting the identifier. [[ml-get-job-stats-query-parms]] == {api-query-parms-title} `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-jobs] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-jobs] [role="child_attributes"] [[ml-get-job-stats-results]] diff --git a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc index 8322608343a87..03b1179d8c375 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-job.asciidoc @@ -27,11 +27,6 @@ Requires the `monitor_ml` cluster privilege. This privilege is included in the [[ml-get-job-desc]] == {api-description-title} -You can get information for multiple {anomaly-jobs} in a single API request by -using a group name, a comma-separated list of jobs, or a wildcard expression. -You can get information for all {anomaly-jobs} by using `_all`, by specifying -`*` as the ``, or by omitting the ``. - IMPORTANT: This API returns a maximum of 10,000 jobs. [[ml-get-job-path-parms]] @@ -39,14 +34,19 @@ IMPORTANT: This API returns a maximum of 10,000 jobs. ``:: (Optional, string) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection-default] +Identifier for the anomaly detection job. It can be a job identifier, a group +name, or a wildcard expression. You can get information for multiple +{anomaly-jobs} in a single API request by using a group name, a comma-separated +list of jobs, or a wildcard expression. You can get information for all +{anomaly-jobs} by using `_all`, by specifying `*` as the job identifier, or by +omitting the identifier. [[ml-get-job-query-parms]] == {api-query-parms-title} `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-jobs] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-jobs] `exclude_generated`:: (Optional, Boolean) diff --git a/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc index c18a4d6473e8e..cf8546d78fe62 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-overall-buckets.asciidoc @@ -26,9 +26,6 @@ Requires the `monitor_ml` cluster privilege. This privilege is included in the [[ml-get-overall-buckets-desc]] == {api-description-title} -You can summarize the bucket results for all {anomaly-jobs} by using `_all` or -by specifying `*` as the ``. - By default, an overall bucket has a span equal to the largest bucket span of the specified {anomaly-jobs}. To override that behavior, use the optional `bucket_span` parameter. To learn more about the concept of buckets, see @@ -53,13 +50,16 @@ a span equal to the jobs' largest bucket span. ``:: (Required, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection-wildcard-list] ++ +You can summarize the bucket results for all {anomaly-jobs} by using `_all` or +by specifying `*` as the job identifier. -[[ml-get-overall-buckets-request-body]] -== {api-request-body-title} +[[ml-get-overall-buckets-query-parms]] +== {api-query-parms-title} `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-jobs] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-jobs] `bucket_span`:: (Optional, string) The span of the overall buckets. Must be greater or equal to @@ -90,6 +90,12 @@ specific timestamps. (Optional, integer) The number of top {anomaly-job} bucket scores to be used in the `overall_score` calculation. Defaults to `1`. +[[ml-get-overall-buckets-request-body]] +== {api-request-body-title} + +You can also specify the query parameters (such as `allow_no_match` and +`bucket_span`) in the request body. + [[ml-get-overall-buckets-results]] == {api-response-body-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc index 693b11d8210d3..bcbee61da8a69 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc @@ -41,8 +41,8 @@ of detectors. (Required, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] -[[ml-get-record-request-body]] -== {api-request-body-title} +[[ml-get-record-query-parms]] +== {api-query-parms-title} `desc`:: (Optional, Boolean) @@ -57,17 +57,17 @@ specific timestamps. (Optional, Boolean) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=exclude-interim-results] -`page`.`from`:: +`from`:: (Optional, integer) Skips the specified number of records. Defaults to `0`. -`page`.`size`:: -(Optional, integer) Specifies the maximum number of records to obtain. Defaults -to `100`. - `record_score`:: (Optional, double) Returns records with anomaly scores greater or equal than this value. Defaults to `0.0`. +`size`:: +(Optional, integer) Specifies the maximum number of records to obtain. Defaults +to `100`. + `sort`:: (Optional, string) Specifies the sort field for the requested records. By default, the records are sorted by the `record_score` value. @@ -76,6 +76,19 @@ default, the records are sorted by the `record_score` value. (Optional, string) Returns records with timestamps after this time. Defaults to `-1`, which means it is unset and results are not limited to specific timestamps. +[[ml-get-record-request-body]] +== {api-request-body-title} + +You can also specify some of the query parameters (such as `desc` and +`end`) in the request body. + +`page`.`from`:: +(Optional, integer) Skips the specified number of records. Defaults to `0`. + +`page`.`size`:: +(Optional, integer) Specifies the maximum number of records to obtain. Defaults +to `100`. + [[ml-get-record-results]] == {api-response-body-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc index 94c3ed5628126..331cc51680feb 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc @@ -67,7 +67,7 @@ specifying `*` as the snapshot ID, or by omitting the snapshot ID. [[ml-get-snapshot-request-body]] == {api-request-body-title} -You can also specify the query parameters (such as `desc` and +You can also specify some of the query parameters (such as `desc` and `end`) in the request body. `page`.`from`:: diff --git a/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc b/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc index d8bdf4a463442..92d8908baab41 100644 --- a/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/open-job.asciidoc @@ -38,12 +38,17 @@ data is received. (Required, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] +[[ml-open-job-query-parms]] +== {api-query-parms-title} + +`timeout`:: +(Optional, time) Controls the time to wait until a job has opened. The default +value is 30 minutes. + [[ml-open-job-request-body]] == {api-request-body-title} -`timeout`:: - (Optional, time) Controls the time to wait until a job has opened. The default - value is 30 minutes. +You can also specify the `timeout` query parameter in the request body. [[ml-open-job-response-body]] == {api-response-body-title} diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index 08e53d163e5b3..aec91ee58f66b 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -53,6 +53,27 @@ credentials are used instead. (Required, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=datafeed-id] +[[ml-put-datafeed-query-params]] +== {api-query-parms-title} + +`allow_no_indices`:: +(Optional, Boolean) If `true`, wildcard indices expressions that resolve into no +concrete indices are ignored. This includes the `_all` string or when no indices +are specified. Defaults to `true`. + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `open`. + +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] ++ +deprecated:[7.16.0] + +`ignore_unavailable`:: +(Optional, Boolean) If `true`, unavailable indices (missing or closed) are +ignored. Defaults to `false`. + + [role="child_attributes"] [[ml-put-datafeed-request-body]] == {api-request-body-title} diff --git a/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc index 8dd1070d0ff96..4b3a61133dfb3 100644 --- a/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/revert-snapshot.asciidoc @@ -51,26 +51,32 @@ means the {anomaly-job} starts learning a new model from scratch when it is started. -- - -[[ml-revert-snapshot-request-body]] -== {api-request-body-title} +[[ml-revert-snapshot-query-parms]] +== {api-query-parms-title} `delete_intervening_results`:: - (Optional, Boolean) If true, deletes the results in the time period between - the latest results and the time of the reverted snapshot. It also resets the - model to accept records for this time period. The default value is false. - +(Optional, Boolean) If true, deletes the results in the time period between +the latest results and the time of the reverted snapshot. It also resets the +model to accept records for this time period. The default value is false. ++ +-- NOTE: If you choose not to delete intervening results when reverting a snapshot, the job will not accept input data that is older than the current time. If you want to resend data, then delete the intervening results. +-- + +[[ml-revert-snapshot-request-body]] +== {api-request-body-title} +You can also specify the `delete_intervening_results` query parameter in the +request body. [[ml-revert-snapshot-example]] == {api-examples-title} [source,console] -------------------------------------------------- -POST _ml/anomaly_detectors/high_sum_total_sales/model_snapshots/1575402237/_revert +POST _ml/anomaly_detectors/low_request_rate/model_snapshots/1637092688/_revert { "delete_intervening_results": true } @@ -83,23 +89,25 @@ When the operation is complete, you receive the following results: ---- { "model" : { - "job_id" : "high_sum_total_sales", - "min_version" : "6.4.0", - "timestamp" : 1575402237000, - "description" : "State persisted due to job close at 2019-12-03T19:43:57+0000", - "snapshot_id" : "1575402237", + "job_id" : "low_request_rate", + "min_version" : "7.11.0", + "timestamp" : 1637092688000, + "description" : "State persisted due to job close at 2021-11-16T19:58:08+0000", + "snapshot_id" : "1637092688", "snapshot_doc_count" : 1, "model_size_stats" : { - "job_id" : "high_sum_total_sales", + "job_id" : "low_request_rate", "result_type" : "model_size_stats", - "model_bytes" : 1638816, + "model_bytes" : 45200, + "peak_model_bytes" : 101552, "model_bytes_exceeded" : 0, - "model_bytes_memory_limit" : 10485760, + "model_bytes_memory_limit" : 11534336, "total_by_field_count" : 3, - "total_over_field_count" : 3320, + "total_over_field_count" : 0, "total_partition_field_count" : 2, "bucket_allocation_failures_count" : 0, "memory_status" : "ok", + "assignment_memory_basis" : "current_model_bytes", "categorized_doc_count" : 0, "total_category_count" : 0, "frequent_category_count" : 0, @@ -107,11 +115,11 @@ When the operation is complete, you receive the following results: "dead_category_count" : 0, "failed_category_count" : 0, "categorization_status" : "ok", - "log_time" : 1575402237000, - "timestamp" : 1576965600000 + "log_time" : 1637092688530, + "timestamp" : 1641495600000 }, - "latest_record_time_stamp" : 1576971072000, - "latest_result_time_stamp" : 1576965600000, + "latest_record_time_stamp" : 1641502169000, + "latest_result_time_stamp" : 1641495600000, "retain" : false } } diff --git a/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc index 32c03bc2ae5e6..eb50e0a154a58 100644 --- a/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/stop-datafeed.asciidoc @@ -45,7 +45,7 @@ identifier. `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-datafeeds] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-datafeeds] `force`:: (Optional, Boolean) If true, the {dfeed} is stopped forcefully. diff --git a/docs/reference/ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc index c9b70871a485c..78b366565fbdd 100644 --- a/docs/reference/ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/upgrade-job-model-snapshot.asciidoc @@ -43,13 +43,18 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] (Required, string) include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=snapshot-id] + +[[ml-upgrade-job-model-snapshot-query-parms]] +== {api-query-parms-title} + `timeout`:: - (Optional, time) Controls the time to wait for the request to complete. The default - value is 30 minutes. +(Optional, time) Controls the time to wait for the request to complete. The +default value is 30 minutes. `wait_for_completion`:: -(Optional, boolean) When true, the API won't respond until the upgrade is complete. Otherwise, -it responds as soon as the upgrade task is assigned to a node. Default is false. +(Optional, boolean) When true, the API won't respond until the upgrade is +complete. Otherwise, it responds as soon as the upgrade task is assigned to a +node. Default is false. [[ml-upgrade-job-model-snapshot-response-body]] == {api-response-body-title} diff --git a/docs/reference/ml/ml-shared.asciidoc b/docs/reference/ml/ml-shared.asciidoc index 1d01614a35246..3a11ca6118299 100644 --- a/docs/reference/ml/ml-shared.asciidoc +++ b/docs/reference/ml/ml-shared.asciidoc @@ -17,7 +17,7 @@ return an error and the job waits in the `opening` state until sufficient {ml} node capacity is available. end::allow-lazy-open[] -tag::allow-no-datafeeds[] +tag::allow-no-match-datafeeds[] Specifies what to do when the request: + -- @@ -30,9 +30,9 @@ there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. -- -end::allow-no-datafeeds[] +end::allow-no-match-datafeeds[] -tag::allow-no-jobs[] +tag::allow-no-match-jobs[] Specifies what to do when the request: + -- @@ -45,7 +45,7 @@ when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. -- -end::allow-no-jobs[] +end::allow-no-match-jobs[] tag::allow-no-match[] Specifies what to do when the request: From cf30b54a583918f6e035d59b11e49e903df5a5a5 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Mon, 22 Nov 2021 13:43:16 -0500 Subject: [PATCH 07/88] [DOCS] Fix typo in gap_policy's default value for serial differencing aggregation (#80893) (#80912) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: James Rodewig <40268737+jrodewig@users.noreply.github.com> Co-authored-by: Simon Stücher --- .../aggregations/pipeline/serial-diff-aggregation.asciidoc | 2 +- .../main/java/org/elasticsearch/xpack/ml/aggs/MlAggsHelper.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc index 8b8ce8c6eece0..957b145cb3838 100644 --- a/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc @@ -57,7 +57,7 @@ A `serial_diff` aggregation looks like this in isolation: |`buckets_path` |Path to the metric of interest (see <> for more details |Required | |`lag` |The historical bucket to subtract from the current value. E.g. a lag of 7 will subtract the current value from the value 7 buckets ago. Must be a positive, non-zero integer |Optional |`1` -|`gap_policy` |Determines what should happen when a gap in the data is encountered. |Optional |`insert_zero` +|`gap_policy` |Determines what should happen when a gap in the data is encountered. |Optional |`insert_zeros` |`format` |Format to apply to the output value of this aggregation |Optional | `null` |=== diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/MlAggsHelper.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/MlAggsHelper.java index 87c950e54ccf9..04424f65d6c5d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/MlAggsHelper.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/MlAggsHelper.java @@ -30,7 +30,7 @@ public static InvalidAggregationPathException invalidPathException(List /** * This extracts the bucket values as doubles from the passed aggregations. * - * The gap policy is always `INSERT_ZERO` + * The gap policy is always `INSERT_ZEROS` * @param bucketPath The bucket path from which to extract values * @param aggregations The aggregations * @return The double values and doc_counts extracted from the path if the bucket path exists and the value is a valid number From 38cbd116c92c4ff843a638a1f2c5ddfdf314802c Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 22 Nov 2021 11:34:43 -0800 Subject: [PATCH 08/88] [DOCS] Fixes query parameters for get buckets API (#80643) --- .../apis/get-bucket.asciidoc | 46 ++++++++++++++----- .../apis/get-calendar-event.asciidoc | 22 ++++++--- .../apis/get-calendar.asciidoc | 22 +++++---- .../apis/get-category.asciidoc | 11 ++++- .../apis/get-influencer.asciidoc | 15 ++++-- .../apis/get-record.asciidoc | 15 ++++-- .../apis/get-snapshot.asciidoc | 15 ++++-- 7 files changed, 106 insertions(+), 40 deletions(-) diff --git a/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc index 31a1c534fa675..a234901be1c47 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-bucket.asciidoc @@ -38,15 +38,17 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-anomaly-detection] (Optional, string) The timestamp of a single bucket result. If you do not specify this parameter, the API returns information about all buckets. -[[ml-get-bucket-request-body]] -== {api-request-body-title} + +[[ml-get-bucket-query-parms]] +== {api-query-parms-title} `anomaly_score`:: (Optional, double) Returns buckets with anomaly scores greater or equal than this value. Defaults to `0.0`. `desc`:: -(Optional, Boolean) If true, the buckets are sorted in descending order. Defaults to `false`. +(Optional, Boolean) If true, the buckets are sorted in descending order. +Defaults to `false`. `end`:: (Optional, string) Returns buckets with timestamps earlier than this time. @@ -60,11 +62,12 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=exclude-interim-results] `expand`:: (Optional, Boolean) If true, the output includes anomaly records. Defaults to `false`. -`page`.`from`:: +`from`:: (Optional, integer) Skips the specified number of buckets. Defaults to `0`. -`page`.`size`:: -(Optional, integer) Specifies the maximum number of buckets to obtain. Defaults to `100`. +`size`:: +(Optional, integer) Specifies the maximum number of buckets to obtain. Defaults +to `100`. `sort`:: (Optional, string) Specifies the sort field for the requested buckets. By @@ -75,6 +78,26 @@ default, the buckets are sorted by the `timestamp` field. `-1`, which means it is unset and results are not limited to specific timestamps. +[[ml-get-bucket-request-body]] +== {api-request-body-title} + +You can also specify the query parameters in the request body; the exception are +`from` and `size`, use `page` instead: + +`page`:: ++ +.Properties of `page` +[%collapsible%open] +==== + +`from`::: +(Optional, integer) Skips the specified number of buckets. Defaults to `0`. + +`size`::: +(Optional, integer) Specifies the maximum number of buckets to obtain. Defaults +to `100`. +==== + [role="child_attributes"] [[ml-get-bucket-results]] == {api-response-body-title} @@ -101,15 +124,16 @@ influencer. This score might be updated as newer data is analyzed. (number) The length of the bucket in seconds. This value matches the `bucket_span` that is specified in the job. -`initial_anomaly_score`::: -(number) The score between 0-100 for each bucket influencer. This score is the -initial value that was calculated at the time the bucket was processed. - `influencer_field_name`::: (string) The field name of the influencer. - +//// `influencer_field_value`::: (string) The field value of the influencer. +//// + +`initial_anomaly_score`::: +(number) The score between 0-100 for each bucket influencer. This score is the +initial value that was calculated at the time the bucket was processed. `is_interim`::: (Boolean) diff --git a/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc index 0431aa8aa4e05..e56afcce71aca 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar-event.asciidoc @@ -63,15 +63,23 @@ as the calendar identifier. [[ml-get-calendar-event-request-body]] == {api-request-body-title} -You can also specify some of the query parameters (such as `end` and -`job_id`) in the request body. +You can also specify the query parameters in the request body; the exception are +`from` and `size`, use `page` instead: -`page.from`:: - (Optional, integer) Skips the specified number of events. Defaults to `0`. +`page`:: ++ +.Properties of `page` +[%collapsible%open] +==== + +`from`::: +(Optional, integer) Skips the specified number of events. Defaults to `0`. + +`size`::: +(Optional, integer) Specifies the maximum number of events to obtain. Defaults +to `100`. +==== -`page.size`:: - (Optional, integer) Specifies the maximum number of events to obtain. - Defaults to `100`. [[ml-get-calendar-event-results]] == {api-response-body-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc index 331aa10286db7..e4bea16a59222 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-calendar.asciidoc @@ -53,14 +53,20 @@ or by omitting the identifier. [[ml-get-calendar-request-body]] == {api-request-body-title} -`page`.`from`:: - (Optional, integer) Skips the specified number of calendars. This object is - supported only when you omit the ``. Defaults to `0`. - -`page`.`size`:: - (Optional, integer) Specifies the maximum number of calendars to obtain. - This object is supported only when you omit the ``. Defaults - to `100`. +`page`:: ++ +.Properties of `page` +[%collapsible%open] +==== + +`from`::: +(Optional, integer) Skips the specified number of calendars. This object is +supported only when you omit the ``. Defaults to `0`. + +`size`::: +(Optional, integer) Specifies the maximum number of calendars to obtain. This +object is supported only when you omit the ``. Defaults to `100`. +==== [[ml-get-calendar-results]] == {api-response-body-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc index 37a769b838d6a..034a598b73370 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-category.asciidoc @@ -68,12 +68,19 @@ Defaults to `100`. You can also specify the `partition_field_value` query parameter in the request body. -`page`.`from`:: +`page`:: ++ +.Properties of `page` +[%collapsible%open] +==== + +`from`::: (Optional, integer) Skips the specified number of categories. Defaults to `0`. -`page`.`size`:: +`size`::: (Optional, integer) Specifies the maximum number of categories to obtain. Defaults to `100`. +==== [[ml-get-category-results]] == {api-response-body-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc index 1281a27073e58..76ead2921df6f 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-influencer.asciidoc @@ -74,15 +74,22 @@ timestamps. [[ml-get-influencer-request-body]] == {api-request-body-title} -You can also specify some of the query parameters (such as `desc` and -`end`) in the request body. +You can also specify the query parameters in the request body; the exception are +`from` and `size`, use `page` instead: -`page`.`from`:: +`page`:: ++ +.Properties of `page` +[%collapsible%open] +==== + +`from`::: (Optional, integer) Skips the specified number of influencers. Defaults to `0`. -`page`.`size`:: +`size`::: (Optional, integer) Specifies the maximum number of influencers to obtain. Defaults to `100`. +==== [[ml-get-influencer-results]] == {api-response-body-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc index bcbee61da8a69..9fac1ba37b20d 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-record.asciidoc @@ -79,15 +79,22 @@ default, the records are sorted by the `record_score` value. [[ml-get-record-request-body]] == {api-request-body-title} -You can also specify some of the query parameters (such as `desc` and -`end`) in the request body. +You can also specify the query parameters in the request body; the exception are +`from` and `size`, use `page` instead: -`page`.`from`:: +`page`:: ++ +.Properties of `page` +[%collapsible%open] +==== + +`from`::: (Optional, integer) Skips the specified number of records. Defaults to `0`. -`page`.`size`:: +`size`::: (Optional, integer) Specifies the maximum number of records to obtain. Defaults to `100`. +==== [[ml-get-record-results]] == {api-response-body-title} diff --git a/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc index 331cc51680feb..5f9eef088ee04 100644 --- a/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/get-snapshot.asciidoc @@ -67,15 +67,22 @@ specifying `*` as the snapshot ID, or by omitting the snapshot ID. [[ml-get-snapshot-request-body]] == {api-request-body-title} -You can also specify some of the query parameters (such as `desc` and -`end`) in the request body. +You can also specify the query parameters in the request body; the exception are +`from` and `size`, use `page` instead: -`page`.`from`:: +`page`:: ++ +.Properties of `page` +[%collapsible%open] +==== + +`from`::: (Optional, integer) Skips the specified number of snapshots. Defaults to `0`. -`page`.`size`:: +`size`::: (Optional, integer) Specifies the maximum number of snapshots to obtain. Defaults to `100`. +==== [role="child_attributes"] [[ml-get-snapshot-results]] From 46128da62c578ae6340aa1b7b3d45f7cc8f6617c Mon Sep 17 00:00:00 2001 From: Davis Plumlee <56367316+dplumlee@users.noreply.github.com> Date: Mon, 22 Nov 2021 15:01:53 -0500 Subject: [PATCH 09/88] Granting kibana_system reserved role access to "all" privileges to .internal.preview.alerts* index (#80889) --- .../core/security/authz/store/ReservedRolesStore.java | 7 +++++++ .../core/security/authz/store/ReservedRolesStoreTests.java | 3 ++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 1d578fbdc2edd..2844b086e2371 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -43,6 +43,7 @@ public class ReservedRolesStore implements BiConsumer, ActionListene public static final String ALERTS_BACKING_INDEX = ".internal.alerts*"; public static final String ALERTS_INDEX_ALIAS = ".alerts*"; public static final String PREVIEW_ALERTS_INDEX_ALIAS = ".preview.alerts*"; + public static final String PREVIEW_ALERTS_BACKING_INDEX_ALIAS = ".internal.preview.alerts*"; public static final RoleDescriptor SUPERUSER_ROLE_DESCRIPTOR = new RoleDescriptor( "superuser", @@ -678,6 +679,12 @@ public static RoleDescriptor kibanaSystemRoleDescriptor(String name) { // "Alerts as data" public index alias used in Security Solution // Kibana system user uses them to read / write alerts. RoleDescriptor.IndicesPrivileges.builder().indices(ReservedRolesStore.PREVIEW_ALERTS_INDEX_ALIAS).privileges("all").build(), + // "Alerts as data" internal backing indices used in Security Solution + // Kibana system user creates these indices; reads / writes to them via the aliases (see below). + RoleDescriptor.IndicesPrivileges.builder() + .indices(ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX_ALIAS) + .privileges("all") + .build(), // Endpoint / Fleet policy responses. Kibana requires read access to send telemetry RoleDescriptor.IndicesPrivileges.builder().indices("metrics-endpoint.policy-*").privileges("read").build(), // Endpoint metrics. Kibana requires read access to send telemetry diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index c3f1c05141254..b27d9f0786830 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -465,7 +465,8 @@ public void testKibanaSystemRole() { ReservedRolesStore.ALERTS_LEGACY_INDEX + randomAlphaOfLength(randomIntBetween(0, 13)), ReservedRolesStore.ALERTS_BACKING_INDEX + randomAlphaOfLength(randomIntBetween(0, 13)), ReservedRolesStore.ALERTS_INDEX_ALIAS + randomAlphaOfLength(randomIntBetween(0, 13)), - ReservedRolesStore.PREVIEW_ALERTS_INDEX_ALIAS + randomAlphaOfLength(randomIntBetween(0, 13)) + ReservedRolesStore.PREVIEW_ALERTS_INDEX_ALIAS + randomAlphaOfLength(randomIntBetween(0, 13)), + ReservedRolesStore.PREVIEW_ALERTS_BACKING_INDEX_ALIAS + randomAlphaOfLength(randomIntBetween(0, 13)) ).forEach(index -> assertAllIndicesAccessAllowed(kibanaRole, index)); // read-only index access, including cross cluster From 480fd6aa085ff070a5ec616566e58ab50191e463 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Tue, 23 Nov 2021 10:16:49 +1100 Subject: [PATCH 10/88] [DOCS] Minor clarifications in LDAP SSL docs (#80897) This commit makes a few small changes to the documentation the describes how to configure LDAP with SSL. --- .../securing-communications/tls-ldap.asciidoc | 37 ++++++++++--------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc b/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc index 16e9006908b00..1bf8d27f955e5 100644 --- a/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc @@ -2,20 +2,20 @@ [[tls-ldap]] ==== Encrypting communications between {es} and LDAP -To protect the user credentials that are sent for authentication in an LDAP -realm, it's highly recommended to encrypt communications between {es} and your -LDAP server. Connecting via SSL/TLS ensures that the identity of the LDAP server -is authenticated before {es} transmits the user credentials and the -contents of the connection are encrypted. Clients and nodes that connect via -TLS to the LDAP server need to have the LDAP server's certificate or the -server's root CA certificate installed in their keystore or truststore. - -For more information, see <>. - -. Configure the realm's TLS settings on each node to trust certificates signed -by the CA that signed your LDAP server certificates. The following example -demonstrates how to trust a CA certificate, `cacert.pem`, located within the -{es} configuration directory (ES_PATH_CONF): +To protect the user credentials that are sent for authentication in an LDAP +realm, it's highly recommended to encrypt communications between {es} and your +LDAP server. Connecting via SSL/TLS ensures that the identity of the LDAP server +is authenticated before {es} transmits the user credentials and the +contents of the connection are encrypted. Clients and nodes that connect via +TLS to the LDAP server need to have the LDAP server's certificate or the +server's root CA certificate installed in their keystore or truststore. + +For more information, see <>. + +. Configure the realm's TLS settings on each node to trust certificates signed +by the CA that signed your LDAP server certificates. The following example +demonstrates how to trust a CA certificate, `cacert.pem`, located within the +{es} <>: + -- [source,shell] @@ -29,13 +29,16 @@ xpack: order: 0 url: "ldaps://ldap.example.com:636" ssl: - certificate_authorities: [ "ES_PATH_CONF/cacert.pem" ] + certificate_authorities: [ "cacert.pem" ] -------------------------------------------------- -The CA certificate must be a PEM encoded. +In the example above, the CA certificate must be PEM encoded. + +PKCS#12 and JKS files are also supported - see the description of +`ssl.truststore.path` in <>. NOTE: You can also specify the individual server certificates rather than the CA -certificate, but this is only recommended if you have a single LDAP server or +certificate, but this is only recommended if you have a single LDAP server or the certificates are self-signed. -- From 564ff9db88f6f4a4ad0c4b5cbd3b33dcf6ce8da1 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Tue, 23 Nov 2021 05:01:08 +0100 Subject: [PATCH 11/88] Extract more standard metadata from binary files (#78754) Until now, we have been extracted a few number of fields from the binary files sent to the ingest attachment plugin: * `content`, * `title`, * `author`, * `keywords`, * `date`, * `content_type`, * `content_length`, * `language`. Tika has a list of more standard properties which can be extracted: * `modified`, * `format`, * `identifier`, * `contributor`, * `coverage`, * `modifier`, * `creator_tool`, * `publisher`, * `relation`, * `rights`, * `source`, * `type`, * `description`, * `print_date`, * `metadata_date`, * `latitude`, * `longitude`, * `altitude`, * `rating`, * `comments` This commit exposes those new fields. Related to #22339. Co-authored-by: Keith Massey --- docs/plugins/ingest-attachment.asciidoc | 34 +++++++ plugins/ingest-attachment/build.gradle | 6 ++ .../attachment/AttachmentProcessor.java | 98 ++++++++++++------- .../attachment/AttachmentProcessorTests.java | 88 +++++++++++++++-- .../ingest_attachment/30_files_supported.yml | 14 ++- 5 files changed, 197 insertions(+), 43 deletions(-) diff --git a/docs/plugins/ingest-attachment.asciidoc b/docs/plugins/ingest-attachment.asciidoc index 50711023f93ba..364e0cb9b5564 100644 --- a/docs/plugins/ingest-attachment.asciidoc +++ b/docs/plugins/ingest-attachment.asciidoc @@ -98,6 +98,40 @@ The document's `attachment` object contains extracted properties for the file: NOTE: Keeping the binary as a field within the document might consume a lot of resources. It is highly recommended to remove that field from the document. Set `remove_binary` to `true` to automatically remove the field. +[[ingest-attachment-fields]] +==== Exported fields + +The fields which might be extracted from a document are: + +* `content`, +* `title`, +* `author`, +* `keywords`, +* `date`, +* `content_type`, +* `content_length`, +* `language`, +* `modified`, +* `format`, +* `identifier`, +* `contributor`, +* `coverage`, +* `modifier`, +* `creator_tool`, +* `publisher`, +* `relation`, +* `rights`, +* `source`, +* `type`, +* `description`, +* `print_date`, +* `metadata_date`, +* `latitude`, +* `longitude`, +* `altitude`, +* `rating`, +* `comments` + To extract only certain `attachment` fields, specify the `properties` array: [source,console] diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 94cbc91b49336..87afeae8fc4de 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -86,6 +86,12 @@ tasks.named("forbiddenPatterns").configure { exclude '**/text-cjk-*.txt' } +tasks.named("yamlRestTestV7CompatTransform").configure { task -> + // 2 new tika metadata fields are returned in v8 + task.replaceValueInLength("_source.attachment", 8, "Test ingest attachment processor with .doc file") + task.replaceValueInLength("_source.attachment", 8, "Test ingest attachment processor with .docx file") +} + tasks.named("thirdPartyAudit").configure { ignoreMissingClasses() } diff --git a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java index 44fc9a77ffd32..916b7502b1cdf 100644 --- a/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java +++ b/plugins/ingest-attachment/src/main/java/org/elasticsearch/ingest/attachment/AttachmentProcessor.java @@ -11,6 +11,7 @@ import org.apache.tika.exception.ZeroByteFileException; import org.apache.tika.language.LanguageIdentifier; import org.apache.tika.metadata.Metadata; +import org.apache.tika.metadata.Office; import org.apache.tika.metadata.TikaCoreProperties; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; @@ -132,40 +133,11 @@ public IngestDocument execute(IngestDocument ingestDocument) { additionalFields.put(Property.LANGUAGE.toLowerCase(), language); } - if (properties.contains(Property.DATE)) { - String createdDate = metadata.get(TikaCoreProperties.CREATED); - if (createdDate != null) { - additionalFields.put(Property.DATE.toLowerCase(), createdDate); - } - } - - if (properties.contains(Property.TITLE)) { - String title = metadata.get(TikaCoreProperties.TITLE); - if (Strings.hasLength(title)) { - additionalFields.put(Property.TITLE.toLowerCase(), title); - } - } - - if (properties.contains(Property.AUTHOR)) { - String author = metadata.get("Author"); - if (Strings.hasLength(author)) { - additionalFields.put(Property.AUTHOR.toLowerCase(), author); - } - } - - if (properties.contains(Property.KEYWORDS)) { - String keywords = metadata.get("Keywords"); - if (Strings.hasLength(keywords)) { - additionalFields.put(Property.KEYWORDS.toLowerCase(), keywords); - } - } - - if (properties.contains(Property.CONTENT_TYPE)) { - String contentType = metadata.get(Metadata.CONTENT_TYPE); - if (Strings.hasLength(contentType)) { - additionalFields.put(Property.CONTENT_TYPE.toLowerCase(), contentType); - } - } + addAdditionalField(additionalFields, Property.DATE, metadata.get(TikaCoreProperties.CREATED)); + addAdditionalField(additionalFields, Property.TITLE, metadata.get(TikaCoreProperties.TITLE)); + addAdditionalField(additionalFields, Property.AUTHOR, metadata.get("Author")); + addAdditionalField(additionalFields, Property.KEYWORDS, metadata.get("Keywords")); + addAdditionalField(additionalFields, Property.CONTENT_TYPE, metadata.get(Metadata.CONTENT_TYPE)); if (properties.contains(Property.CONTENT_LENGTH)) { String contentLength = metadata.get(Metadata.CONTENT_LENGTH); @@ -178,6 +150,30 @@ public IngestDocument execute(IngestDocument ingestDocument) { additionalFields.put(Property.CONTENT_LENGTH.toLowerCase(), length); } + addAdditionalField(additionalFields, Property.AUTHOR, metadata.get(TikaCoreProperties.CREATOR)); + addAdditionalField(additionalFields, Property.KEYWORDS, metadata.get(Office.KEYWORDS)); + + addAdditionalField(additionalFields, Property.MODIFIED, metadata.get(TikaCoreProperties.MODIFIED)); + addAdditionalField(additionalFields, Property.FORMAT, metadata.get(TikaCoreProperties.FORMAT)); + addAdditionalField(additionalFields, Property.IDENTIFIER, metadata.get(TikaCoreProperties.IDENTIFIER)); + addAdditionalField(additionalFields, Property.CONTRIBUTOR, metadata.get(TikaCoreProperties.CONTRIBUTOR)); + addAdditionalField(additionalFields, Property.COVERAGE, metadata.get(TikaCoreProperties.COVERAGE)); + addAdditionalField(additionalFields, Property.MODIFIER, metadata.get(TikaCoreProperties.MODIFIER)); + addAdditionalField(additionalFields, Property.CREATOR_TOOL, metadata.get(TikaCoreProperties.CREATOR_TOOL)); + addAdditionalField(additionalFields, Property.PUBLISHER, metadata.get(TikaCoreProperties.PUBLISHER)); + addAdditionalField(additionalFields, Property.RELATION, metadata.get(TikaCoreProperties.RELATION)); + addAdditionalField(additionalFields, Property.RIGHTS, metadata.get(TikaCoreProperties.RIGHTS)); + addAdditionalField(additionalFields, Property.SOURCE, metadata.get(TikaCoreProperties.SOURCE)); + addAdditionalField(additionalFields, Property.TYPE, metadata.get(TikaCoreProperties.TYPE)); + addAdditionalField(additionalFields, Property.DESCRIPTION, metadata.get(TikaCoreProperties.DESCRIPTION)); + addAdditionalField(additionalFields, Property.PRINT_DATE, metadata.get(TikaCoreProperties.PRINT_DATE)); + addAdditionalField(additionalFields, Property.METADATA_DATE, metadata.get(TikaCoreProperties.METADATA_DATE)); + addAdditionalField(additionalFields, Property.LATITUDE, metadata.get(TikaCoreProperties.LATITUDE)); + addAdditionalField(additionalFields, Property.LONGITUDE, metadata.get(TikaCoreProperties.LONGITUDE)); + addAdditionalField(additionalFields, Property.ALTITUDE, metadata.get(TikaCoreProperties.ALTITUDE)); + addAdditionalField(additionalFields, Property.RATING, metadata.get(TikaCoreProperties.RATING)); + addAdditionalField(additionalFields, Property.COMMENTS, metadata.get(TikaCoreProperties.COMMENTS)); + ingestDocument.setFieldValue(targetField, additionalFields); if (removeBinary) { @@ -186,6 +182,18 @@ public IngestDocument execute(IngestDocument ingestDocument) { return ingestDocument; } + /** + * Add an additional field if not null or empty + * @param additionalFields additional fields + * @param property property to add + * @param value value to add + */ + private void addAdditionalField(Map additionalFields, Property property, String value) { + if (properties.contains(property) && Strings.hasLength(value)) { + additionalFields.put(property.toLowerCase(), value); + } + } + @Override public String getType() { return TYPE; @@ -270,7 +278,27 @@ enum Property { DATE, CONTENT_TYPE, CONTENT_LENGTH, - LANGUAGE; + LANGUAGE, + MODIFIED, + FORMAT, + IDENTIFIER, + CONTRIBUTOR, + COVERAGE, + MODIFIER, + CREATOR_TOOL, + PUBLISHER, + RELATION, + RIGHTS, + SOURCE, + TYPE, + DESCRIPTION, + PRINT_DATE, + METADATA_DATE, + LATITUDE, + LONGITUDE, + ALTITUDE, + RATING, + COMMENTS; public static Property parse(String value) { return valueOf(value.toUpperCase(Locale.ROOT)); diff --git a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java index 1758396819822..1fead50a600e7 100644 --- a/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java +++ b/plugins/ingest-attachment/src/test/java/org/elasticsearch/ingest/attachment/AttachmentProcessorTests.java @@ -68,9 +68,20 @@ public void testEnglishTextDocument() throws Exception { } public void testHtmlDocumentWithRandomFields() throws Exception { - // date is not present in the html doc + // some metadata are not present in the html doc + // "date", "metadata_date", "comments", "modified", "modifier", "print_date", "relation", "creator_tool", "altitude" + // "identifier", "longitude", "publisher", "description", "latitude", "format", "source", "coverage" + // "rating", "type", "contributor", "rights" + // we are only trying with content, title, author, keywords, content_type and content_length. ArrayList fieldsList = new ArrayList<>( - EnumSet.complementOf(EnumSet.of(AttachmentProcessor.Property.DATE)) + EnumSet.of( + AttachmentProcessor.Property.CONTENT, + AttachmentProcessor.Property.TITLE, + AttachmentProcessor.Property.AUTHOR, + AttachmentProcessor.Property.KEYWORDS, + AttachmentProcessor.Property.CONTENT_TYPE, + AttachmentProcessor.Property.CONTENT_LENGTH + ) ); Set selectedProperties = new HashSet<>(); @@ -128,7 +139,20 @@ public void testEmptyTextDocument() throws Exception { public void testWordDocument() throws Exception { Map attachmentData = parseDocument("issue-104.docx", processor); - assertThat(attachmentData.keySet(), containsInAnyOrder("content", "language", "date", "author", "content_type", "content_length")); + assertThat( + attachmentData.keySet(), + containsInAnyOrder( + "content", + "language", + "date", + "author", + "content_type", + "content_length", + "modifier", + "modified", + "publisher" + ) + ); assertThat(attachmentData.get("content"), is(notNullValue())); assertThat(attachmentData.get("language"), is("en")); assertThat(attachmentData.get("date"), is("2012-10-12T11:17:00Z")); @@ -138,12 +162,28 @@ public void testWordDocument() throws Exception { attachmentData.get("content_type").toString(), is("application/vnd.openxmlformats-officedocument.wordprocessingml.document") ); + assertThat(attachmentData.get("modifier").toString(), is("Luka Lampret")); + assertThat(attachmentData.get("modified").toString(), is("2015-02-20T11:36:00Z")); + assertThat(attachmentData.get("publisher").toString(), is("JDI")); } public void testWordDocumentWithVisioSchema() throws Exception { Map attachmentData = parseDocument("issue-22077.docx", processor); - assertThat(attachmentData.keySet(), containsInAnyOrder("content", "language", "date", "author", "content_type", "content_length")); + assertThat( + attachmentData.keySet(), + containsInAnyOrder( + "content", + "language", + "date", + "author", + "content_type", + "content_length", + "modifier", + "modified", + "print_date" + ) + ); assertThat(attachmentData.get("content").toString(), containsString("Table of Contents")); assertThat(attachmentData.get("language"), is("en")); assertThat(attachmentData.get("date"), is("2015-01-06T18:07:00Z")); @@ -153,18 +193,37 @@ public void testWordDocumentWithVisioSchema() throws Exception { attachmentData.get("content_type").toString(), is("application/vnd.openxmlformats-officedocument.wordprocessingml.document") ); + assertThat(attachmentData.get("modifier").toString(), is("Chris Dufour")); + assertThat(attachmentData.get("modified").toString(), is("2016-12-04T16:58:00Z")); + assertThat(attachmentData.get("print_date").toString(), is("2015-01-05T19:12:00Z")); } public void testLegacyWordDocumentWithVisioSchema() throws Exception { Map attachmentData = parseDocument("issue-22077.doc", processor); - assertThat(attachmentData.keySet(), containsInAnyOrder("content", "language", "date", "author", "content_type", "content_length")); + assertThat( + attachmentData.keySet(), + containsInAnyOrder( + "content", + "language", + "date", + "author", + "content_type", + "content_length", + "modifier", + "modified", + "print_date" + ) + ); assertThat(attachmentData.get("content").toString(), containsString("Table of Contents")); assertThat(attachmentData.get("language"), is("en")); assertThat(attachmentData.get("date"), is("2016-12-16T15:04:00Z")); assertThat(attachmentData.get("author"), is(notNullValue())); assertThat(attachmentData.get("content_length"), is(notNullValue())); assertThat(attachmentData.get("content_type").toString(), is("application/msword")); + assertThat(attachmentData.get("modifier").toString(), is("David Pilato")); + assertThat(attachmentData.get("modified").toString(), is("2016-12-16T15:04:00Z")); + assertThat(attachmentData.get("print_date").toString(), is("2015-01-05T19:12:00Z")); } public void testPdf() throws Exception { @@ -217,9 +276,26 @@ public void testEpubDocument() throws Exception { assertThat( attachmentData.keySet(), - containsInAnyOrder("language", "content", "author", "title", "content_type", "content_length", "date", "keywords") + containsInAnyOrder( + "language", + "content", + "author", + "title", + "content_type", + "content_length", + "date", + "keywords", + "identifier", + "contributor", + "publisher", + "description" + ) ); assertThat(attachmentData.get("content_type").toString(), containsString("application/epub+zip")); + assertThat(attachmentData.get("identifier").toString(), is("1234567890")); + assertThat(attachmentData.get("contributor").toString(), is("no-one")); + assertThat(attachmentData.get("publisher").toString(), is("Apache")); + assertThat(attachmentData.get("description").toString(), is("This is an ePub test publication for Tika.")); } // no real detection, just rudimentary diff --git a/plugins/ingest-attachment/src/yamlRestTest/resources/rest-api-spec/test/ingest_attachment/30_files_supported.yml b/plugins/ingest-attachment/src/yamlRestTest/resources/rest-api-spec/test/ingest_attachment/30_files_supported.yml index 543f394782da9..324776bc20f87 100644 --- a/plugins/ingest-attachment/src/yamlRestTest/resources/rest-api-spec/test/ingest_attachment/30_files_supported.yml +++ b/plugins/ingest-attachment/src/yamlRestTest/resources/rest-api-spec/test/ingest_attachment/30_files_supported.yml @@ -1,5 +1,8 @@ --- "Test ingest attachment processor with .doc file": + - skip: + version: " - 7.99.99" + reason: "new fields added in 8.0.0" - do: ingest.put_pipeline: id: "my_pipeline" @@ -27,17 +30,22 @@ get: index: test id: 1 - - length: { _source.attachment: 6 } + - length: { _source.attachment: 8 } - match: { _source.attachment.content: "Test elasticsearch" } - match: { _source.attachment.language: "et" } - match: { _source.attachment.author: "David Pilato" } - match: { _source.attachment.date: "2016-03-10T08:25:00Z" } - match: { _source.attachment.content_length: 19 } - match: { _source.attachment.content_type: "application/msword" } + - match: { _source.attachment.modifier: "David Pilato" } + - match: { _source.attachment.modified: "2016-03-10T08:25:00Z" } --- "Test ingest attachment processor with .docx file": + - skip: + version: " - 7.99.99" + reason: "new fields added in 8.0.0" - do: ingest.put_pipeline: id: "my_pipeline" @@ -65,10 +73,12 @@ get: index: test id: 1 - - length: { _source.attachment: 6 } + - length: { _source.attachment: 8 } - match: { _source.attachment.content: "Test elasticsearch" } - match: { _source.attachment.language: "et" } - match: { _source.attachment.author: "David Pilato" } - match: { _source.attachment.date: "2016-03-10T08:24:00Z" } - match: { _source.attachment.content_length: 19 } - match: { _source.attachment.content_type: "application/vnd.openxmlformats-officedocument.wordprocessingml.document" } + - match: { _source.attachment.modifier: "David Pilato" } + - match: { _source.attachment.modified: "2016-03-10T08:24:00Z" } From 6de5edd70d4e322d3c86b5c0fb4fa256fc17aa0f Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Tue, 23 Nov 2021 09:11:11 +0100 Subject: [PATCH 12/88] Fix several potential circuit breaker leaks in Aggregators (#79676) This commit adds a new CircuitBreaker implementation in the test that throws CircuitBreaker Exceptions randomly. This new circuit breaker helps uncover several places where we might leak if the circuit breaker throws such exception. --- .../bucket/BucketsAggregator.java | 9 +- .../bucket/composite/BinaryValuesSource.java | 10 +- .../bucket/composite/CompositeAggregator.java | 4 +- .../bucket/composite/DoubleValuesSource.java | 10 +- .../bucket/composite/LongValuesSource.java | 10 +- .../AutoDateHistogramAggregator.java | 18 ++- .../VariableWidthHistogramAggregator.java | 11 +- .../bucket/terms/BytesKeyedBucketOrds.java | 10 +- .../GlobalOrdinalsStringTermsAggregator.java | 11 +- .../terms/MapStringTermsAggregator.java | 14 +- .../bucket/terms/NumericTermsAggregator.java | 10 +- .../bucket/terms/SignificanceLookup.java | 92 ++++++++----- .../SignificantTextAggregatorFactory.java | 50 ++++--- .../search/sort/BucketedSort.java | 33 ++++- .../aggregations/AggregatorTestCase.java | 126 +++++++++++++++++- .../topmetrics/TopMetricsAggregator.java | 4 +- .../TopMetricsAggregatorFactory.java | 44 ++++-- .../analytics/ttest/TTestStatsBuilder.java | 16 ++- 18 files changed, 391 insertions(+), 91 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index 8be166c0fe4a5..449c46078d37b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -99,8 +99,11 @@ public final void collectExistingBucket(LeafBucketCollector subCollector, int do * If a bucket's ordinal is mapped to -1 then the bucket is removed entirely. */ public final void rewriteBuckets(long newNumBuckets, LongUnaryOperator mergeMap) { - try (LongArray oldDocCounts = docCounts) { + LongArray oldDocCounts = docCounts; + boolean success = false; + try { docCounts = bigArrays().newLongArray(newNumBuckets, true); + success = true; docCounts.fill(0, newNumBuckets, 0); for (long i = 0; i < oldDocCounts.size(); i++) { long docCount = oldDocCounts.get(i); @@ -113,6 +116,10 @@ public final void rewriteBuckets(long newNumBuckets, LongUnaryOperator mergeMap) docCounts.increment(destinationOrdinal, docCount); } } + } finally { + if (success) { + oldDocCounts.close(); + } } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java index 8d8cf61353ebf..8adb8c9b364fc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java @@ -52,7 +52,15 @@ class BinaryValuesSource extends SingleDimensionValuesSource { this.breakerConsumer = breakerConsumer; this.docValuesFunc = docValuesFunc; this.values = bigArrays.newObjectArray(Math.min(size, 100)); - this.valueBuilders = bigArrays.newObjectArray(Math.min(size, 100)); + boolean success = false; + try { + this.valueBuilders = bigArrays.newObjectArray(Math.min(size, 100)); + success = true; + } finally { + if (success == false) { + close(); + } + } } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index da4f3669c09eb..63bc10a7e9e29 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -149,7 +149,9 @@ protected void doClose() { try { Releasables.close(queue); } finally { - Releasables.close(sources); + if (sources != null) { + Releasables.close(sources); + } } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java index bf75097beca9a..e1b15f0db93ea 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java @@ -46,7 +46,15 @@ class DoubleValuesSource extends SingleDimensionValuesSource { super(bigArrays, format, fieldType, missingBucket, missingOrder, size, reverseMul); this.docValuesFunc = docValuesFunc; this.bits = this.missingBucket ? new BitArray(100, bigArrays) : null; - this.values = bigArrays.newDoubleArray(Math.min(size, 100), false); + boolean success = false; + try { + this.values = bigArrays.newDoubleArray(Math.min(size, 100), false); + success = true; + } finally { + if (success == false) { + close(); + } + } } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java index 5bc9ee0906fe8..821958f10b3ef 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -64,7 +64,15 @@ class LongValuesSource extends SingleDimensionValuesSource { this.docValuesFunc = docValuesFunc; this.rounding = rounding; this.bits = missingBucket ? new BitArray(Math.min(size, 100), bigArrays) : null; - this.values = bigArrays.newLongArray(Math.min(size, 100), false); + boolean success = false; + try { + this.values = bigArrays.newLongArray(Math.min(size, 100), false); + success = true; + } finally { + if (success == false) { + close(); + } + } } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index ebfc2dcbbfa1b..748f05aef67e7 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -276,10 +276,13 @@ private void increaseRoundingIfNeeded(long rounded) { return; } do { - try (LongKeyedBucketOrds oldOrds = bucketOrds) { + LongKeyedBucketOrds oldOrds = bucketOrds; + boolean success = false; + try { preparedRounding = prepareRounding(++roundingIdx); long[] mergeMap = new long[Math.toIntExact(oldOrds.size())]; bucketOrds = new LongKeyedBucketOrds.FromSingle(bigArrays()); + success = true; // now it is safe to close oldOrds after we finish LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = oldOrds.ordsEnum(0); while (ordsEnum.next()) { long oldKey = ordsEnum.value(); @@ -288,6 +291,10 @@ private void increaseRoundingIfNeeded(long rounded) { mergeMap[(int) ordsEnum.ord()] = newBucketOrd >= 0 ? newBucketOrd : -1 - newBucketOrd; } merge(mergeMap, bucketOrds.size()); + } finally { + if (success) { + oldOrds.close(); + } } } while (roundingIdx < roundingInfos.length - 1 && (bucketOrds.size() > targetBuckets * roundingInfos[roundingIdx].getMaximumInnerInterval() @@ -527,9 +534,12 @@ private int increaseRoundingIfNeeded(long owningBucketOrd, int oldEstimatedBucke private void rebucket() { rebucketCount++; - try (LongKeyedBucketOrds oldOrds = bucketOrds) { + LongKeyedBucketOrds oldOrds = bucketOrds; + boolean success = false; + try { long[] mergeMap = new long[Math.toIntExact(oldOrds.size())]; bucketOrds = new LongKeyedBucketOrds.FromMany(bigArrays()); + success = true; for (long owningBucketOrd = 0; owningBucketOrd <= oldOrds.maxOwningBucketOrd(); owningBucketOrd++) { LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = oldOrds.ordsEnum(owningBucketOrd); Rounding.Prepared preparedRounding = preparedRoundings[roundingIndexFor(owningBucketOrd)]; @@ -543,6 +553,10 @@ private void rebucket() { liveBucketCountUnderestimate.set(owningBucketOrd, Math.toIntExact(bucketOrds.bucketsInOrd(owningBucketOrd))); } merge(mergeMap, bucketOrds.size()); + } finally { + if (success) { + oldOrds.close(); + } } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java index 6802fcd8ea2bc..299c67fec4995 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregator.java @@ -159,7 +159,16 @@ private class MergeBucketsPhase extends CollectionPhase { MergeBucketsPhase(DoubleArray buffer, int bufferSize) { // Cluster the documents to reduce the number of buckets - bucketBufferedDocs(buffer, bufferSize, mergePhaseInitialBucketCount(shardSize)); + boolean success = false; + try { + bucketBufferedDocs(buffer, bufferSize, mergePhaseInitialBucketCount(shardSize)); + success = true; + } finally { + if (success == false) { + close(); + clusterMaxes = clusterMins = clusterCentroids = clusterSizes = null; + } + } if (bufferSize > 1) { updateAvgBucketDistance(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java index 680e45326e0f5..e924b93a52407 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/BytesKeyedBucketOrds.java @@ -159,7 +159,15 @@ private static class FromMany extends BytesKeyedBucketOrds { private FromMany(BigArrays bigArrays) { bytesToLong = new BytesRefHash(1, bigArrays); - longToBucketOrds = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.MANY); + boolean success = false; + try { + longToBucketOrds = LongKeyedBucketOrds.build(bigArrays, CardinalityUpperBound.MANY); + success = true; + } finally { + if (success == false) { + close(); + } + } } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index cdba34a831ad2..1bc81b03ff543 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -812,7 +812,7 @@ class SignificantTermsResults extends ResultStrategy< private final long supersetSize; private final SignificanceHeuristic significanceHeuristic; - private LongArray subsetSizes = bigArrays().newLongArray(1, true); + private LongArray subsetSizes; SignificantTermsResults( SignificanceLookup significanceLookup, @@ -822,6 +822,15 @@ class SignificantTermsResults extends ResultStrategy< backgroundFrequencies = significanceLookup.bytesLookup(bigArrays(), cardinality); supersetSize = significanceLookup.supersetSize(); this.significanceHeuristic = significanceHeuristic; + boolean success = false; + try { + subsetSizes = bigArrays().newLongArray(1, true); + success = true; + } finally { + if (success == false) { + close(); + } + } } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java index 8ba01b3fae754..dbcdde4ed1e11 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/MapStringTermsAggregator.java @@ -69,10 +69,11 @@ public MapStringTermsAggregator( Map metadata ) throws IOException { super(name, factories, context, parent, order, format, bucketCountThresholds, collectionMode, showTermDocCountError, metadata); - this.collectorSource = collectorSource; this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job. this.includeExclude = includeExclude; bucketOrds = BytesKeyedBucketOrds.build(context.bigArrays(), cardinality); + // set last because if there is an error during construction the collector gets release outside the constructor. + this.collectorSource = collectorSource; } @Override @@ -478,7 +479,7 @@ class SignificantTermsResults extends ResultStrategy a.new SignificantTermsResults(lookup, significanceHeuristic, cardinality), - null, - DocValueFormat.RAW, - bucketCountThresholds, - incExcFilter, - context, - parent, - SubAggCollectionMode.BREADTH_FIRST, - false, - cardinality, - metadata - ); + final IncludeExclude.StringFilter incExcFilter = includeExclude == null + ? null + : includeExclude.convertToStringFilter(DocValueFormat.RAW); + + final SignificanceLookup lookup = new SignificanceLookup(context, fieldType, DocValueFormat.RAW, backgroundFilter); + final CollectorSource collectorSource = createCollectorSource(); + boolean success = false; + try { + final MapStringTermsAggregator mapStringTermsAggregator = new MapStringTermsAggregator( + name, + factories, + collectorSource, + a -> a.new SignificantTermsResults(lookup, significanceHeuristic, cardinality), + null, + DocValueFormat.RAW, + bucketCountThresholds, + incExcFilter, + context, + parent, + SubAggCollectionMode.BREADTH_FIRST, + false, + cardinality, + metadata + ); + success = true; + return mapStringTermsAggregator; + } finally { + if (success == false) { + Releasables.close(collectorSource); + } + } } /** diff --git a/server/src/main/java/org/elasticsearch/search/sort/BucketedSort.java b/server/src/main/java/org/elasticsearch/search/sort/BucketedSort.java index ad0e704665cd5..f3f15111f8e9b 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/BucketedSort.java +++ b/server/src/main/java/org/elasticsearch/search/sort/BucketedSort.java @@ -453,10 +453,19 @@ private ExtraData.Loader loader() throws IOException { * Superclass for implementations of {@linkplain BucketedSort} for {@code double} keys. */ public abstract static class ForDoubles extends BucketedSort { - private DoubleArray values = bigArrays.newDoubleArray(getBucketSize(), false); + private DoubleArray values; public ForDoubles(BigArrays bigArrays, SortOrder sortOrder, DocValueFormat format, int bucketSize, ExtraData extra) { super(bigArrays, sortOrder, format, bucketSize, extra); + boolean success = false; + try { + values = bigArrays.newDoubleArray(getBucketSize(), false); + success = true; + } finally { + if (success == false) { + close(); + } + } initGatherOffsets(); } @@ -544,7 +553,7 @@ public abstract static class ForFloats extends BucketedSort { */ public static final int MAX_BUCKET_SIZE = (int) Math.pow(2, 24); - private FloatArray values = bigArrays.newFloatArray(1, false); + private FloatArray values; public ForFloats(BigArrays bigArrays, SortOrder sortOrder, DocValueFormat format, int bucketSize, ExtraData extra) { super(bigArrays, sortOrder, format, bucketSize, extra); @@ -552,6 +561,15 @@ public ForFloats(BigArrays bigArrays, SortOrder sortOrder, DocValueFormat format close(); throw new IllegalArgumentException("bucket size must be less than [2^24] but was [" + bucketSize + "]"); } + boolean success = false; + try { + values = bigArrays.newFloatArray(1, false); + success = true; + } finally { + if (success == false) { + close(); + } + } initGatherOffsets(); } @@ -626,10 +644,19 @@ protected final boolean docBetterThan(long index) { * Superclass for implementations of {@linkplain BucketedSort} for {@code long} keys. */ public abstract static class ForLongs extends BucketedSort { - private LongArray values = bigArrays.newLongArray(1, false); + private LongArray values; public ForLongs(BigArrays bigArrays, SortOrder sortOrder, DocValueFormat format, int bucketSize, ExtraData extra) { super(bigArrays, sortOrder, format, bucketSize, extra); + boolean success = false; + try { + values = bigArrays.newLongArray(1, false); + success = true; + } finally { + if (success == false) { + close(); + } + } initGatherOffsets(); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 533e56e6936ef..41fc2ff803634 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -44,6 +44,7 @@ import org.elasticsearch.common.TriConsumer; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -94,7 +95,9 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.indices.breaker.AllCircuitBreakerStats; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.indices.breaker.CircuitBreakerStats; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; @@ -471,11 +474,13 @@ protected A searchAndReduc * Collects all documents that match the provided query {@link Query} and * returns the reduced {@link InternalAggregation}. * + * It runs the aggregation as well using a circuit breaker that randomly throws {@link CircuitBreakingException} + * in order to mak sure the implementation does not leak. + * * @param splitLeavesIntoSeparateAggregators If true this creates a new {@link Aggregator} * for each leaf as though it were a separate index. If false this aggregates * all leaves together, like we do in production. */ - @SuppressWarnings("unchecked") protected A searchAndReduce( IndexSettings indexSettings, IndexSearcher searcher, @@ -484,12 +489,57 @@ protected A searchAndReduc int maxBucket, boolean splitLeavesIntoSeparateAggregators, MappedFieldType... fieldTypes + ) throws IOException { + // First run it to find circuit breaker leaks on the aggregator + CircuitBreakerService crankyService = new CrankyCircuitBreakerService(); + for (int i = 0; i < 5; i++) { + try { + searchAndReduce( + indexSettings, + searcher, + query, + builder, + maxBucket, + splitLeavesIntoSeparateAggregators, + crankyService, + fieldTypes + ); + } catch (CircuitBreakingException e) { + // expected + } catch (IOException e) { + throw e; + } + } + // Second run it to the end + CircuitBreakerService breakerService = new NoneCircuitBreakerService(); + return searchAndReduce( + indexSettings, + searcher, + query, + builder, + maxBucket, + splitLeavesIntoSeparateAggregators, + breakerService, + fieldTypes + ); + } + + @SuppressWarnings("unchecked") + private A searchAndReduce( + IndexSettings indexSettings, + IndexSearcher searcher, + Query query, + AggregationBuilder builder, + int maxBucket, + boolean splitLeavesIntoSeparateAggregators, + CircuitBreakerService breakerService, + MappedFieldType... fieldTypes ) throws IOException { final IndexReaderContext ctx = searcher.getTopReaderContext(); final PipelineTree pipelines = builder.buildPipelineTree(); List aggs = new ArrayList<>(); Query rewritten = searcher.rewrite(query); - CircuitBreakerService breakerService = new NoneCircuitBreakerService(); + AggregationContext context = createAggregationContext( searcher, indexSettings, @@ -1313,4 +1363,76 @@ public List getAggregations() { ); } } + + private static class CrankyCircuitBreakerService extends CircuitBreakerService { + + private final CircuitBreaker breaker = new CircuitBreaker() { + @Override + public void circuitBreak(String fieldName, long bytesNeeded) { + + } + + @Override + public void addEstimateBytesAndMaybeBreak(long bytes, String label) throws CircuitBreakingException { + if (random().nextInt(20) == 0) { + throw new CircuitBreakingException("fake error", Durability.PERMANENT); + } + } + + @Override + public void addWithoutBreaking(long bytes) { + + } + + @Override + public long getUsed() { + return 0; + } + + @Override + public long getLimit() { + return 0; + } + + @Override + public double getOverhead() { + return 0; + } + + @Override + public long getTrippedCount() { + return 0; + } + + @Override + public String getName() { + return CircuitBreaker.FIELDDATA; + } + + @Override + public Durability getDurability() { + return null; + } + + @Override + public void setLimitAndOverhead(long limit, double overhead) { + + } + }; + + @Override + public CircuitBreaker getBreaker(String name) { + return breaker; + } + + @Override + public AllCircuitBreakerStats stats() { + return new AllCircuitBreakerStats(new CircuitBreakerStats[] { stats(CircuitBreaker.FIELDDATA) }); + } + + @Override + public CircuitBreakerStats stats(String name) { + return new CircuitBreakerStats(CircuitBreaker.FIELDDATA, -1, -1, 0, 0); + } + } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregator.java index 575be34199aba..7a3d8a497ea18 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregator.java @@ -76,7 +76,8 @@ class TopMetricsAggregator extends NumericMetricsAggregator.MultiValue { ) throws IOException { super(name, context, parent, metadata); this.size = size; - this.metrics = new TopMetricsAggregator.Metrics(metricValues); + // In case of failure we are releasing this objects outside therefore we need to set it at the end. + TopMetricsAggregator.Metrics metrics = new TopMetricsAggregator.Metrics(metricValues); /* * If we're only collecting a single value then only provided *that* * value to the sort so that swaps and loads are just a little faster @@ -84,6 +85,7 @@ class TopMetricsAggregator extends NumericMetricsAggregator.MultiValue { */ BucketedSort.ExtraData values = metrics.values.length == 1 ? metrics.values[0] : metrics; this.sort = context.buildBucketedSort(sort, size, values); + this.metrics = metrics; } @Override diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregatorFactory.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregatorFactory.java index e79dd650219ec..199031f3f4e72 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregatorFactory.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregatorFactory.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.core.Releasables; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -77,21 +78,38 @@ protected TopMetricsAggregator createInternal(Aggregator parent, CardinalityUppe ); } MetricValues[] metricValues = new MetricValues[metricFields.size()]; - for (int i = 0; i < metricFields.size(); i++) { - MultiValuesSourceFieldConfig config = metricFields.get(i); - ValuesSourceConfig vsConfig = ValuesSourceConfig.resolve( + boolean success = false; + try { + for (int i = 0; i < metricFields.size(); i++) { + MultiValuesSourceFieldConfig config = metricFields.get(i); + ValuesSourceConfig vsConfig = ValuesSourceConfig.resolve( + context, + null, + config.getFieldName(), + config.getScript(), + config.getMissing(), + config.getTimeZone(), + null, + CoreValuesSourceType.NUMERIC + ); + MetricValuesSupplier supplier = context.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, vsConfig); + metricValues[i] = supplier.build(size, context.bigArrays(), config.getFieldName(), vsConfig); + } + TopMetricsAggregator aggregator = new TopMetricsAggregator( + name, context, - null, - config.getFieldName(), - config.getScript(), - config.getMissing(), - config.getTimeZone(), - null, - CoreValuesSourceType.NUMERIC + parent, + metadata, + size, + sortBuilders.get(0), + metricValues ); - MetricValuesSupplier supplier = context.getValuesSourceRegistry().getAggregator(REGISTRY_KEY, vsConfig); - metricValues[i] = supplier.build(size, context.bigArrays(), config.getFieldName(), vsConfig); + success = true; + return aggregator; + } finally { + if (success == false) { + Releasables.close(metricValues); + } } - return new TopMetricsAggregator(name, context, parent, metadata, size, sortBuilders.get(0), metricValues); } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestStatsBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestStatsBuilder.java index 25755a745a387..8e85d16e01f9f 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestStatsBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestStatsBuilder.java @@ -24,10 +24,18 @@ public class TTestStatsBuilder implements Releasable { TTestStatsBuilder(BigArrays bigArrays) { counts = bigArrays.newLongArray(1, true); - sums = bigArrays.newDoubleArray(1, true); - compensations = bigArrays.newDoubleArray(1, true); - sumOfSqrs = bigArrays.newDoubleArray(1, true); - sumOfSqrCompensations = bigArrays.newDoubleArray(1, true); + boolean success = false; + try { + sums = bigArrays.newDoubleArray(1, true); + compensations = bigArrays.newDoubleArray(1, true); + sumOfSqrs = bigArrays.newDoubleArray(1, true); + sumOfSqrCompensations = bigArrays.newDoubleArray(1, true); + success = true; + } finally { + if (success == false) { + close(); + } + } } public TTestStats get(long bucket) { From 474bac80103243e989b04654a50a91c0d656c0b6 Mon Sep 17 00:00:00 2001 From: weizijun Date: Tue, 23 Nov 2021 16:34:15 +0800 Subject: [PATCH 13/88] add ignore info (#80924) As https://github.com/elastic/elasticsearch/issues/80918 mentions that org.elasticsearch.index.TimeSeriesModeIT.testAddTimeStampMeta will random failed. I run the failed gradle cmd, but it is ok. I first ignore the test, and continue to see what's wrong --- .../java/org/elasticsearch/index/TimeSeriesModeIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/TimeSeriesModeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/TimeSeriesModeIT.java index 74f58fe2364ae..657772a4d3e58 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/TimeSeriesModeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/TimeSeriesModeIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.index; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.DocWriteResponse.Result; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.index.IndexResponse; @@ -486,6 +487,7 @@ public void testEnabledTimeStampMapper() throws IOException { assertThat(getMappingsResponse.getMappings().get(index).source().string(), equalTo(Strings.toString(expect))); } + @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/80918") public void testAddTimeStampMeta() throws IOException { Settings s = Settings.builder() .put(IndexSettings.MODE.getKey(), "time_series") From 7db06c110bff7306899674e764248494f4616837 Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Tue, 23 Nov 2021 09:51:09 +0000 Subject: [PATCH 14/88] Fix shadowed vars pt6 (#80899) Part of #19752. Fix more instances where local variable names were shadowing field names. --- .../plugins/cli/InstallPluginAction.java | 4 +- .../plugins/cli/SyncPluginsAction.java | 4 +- .../plugins/cli/InstallPluginActionTests.java | 49 ++++++++------- .../upgrades/FullClusterRestartIT.java | 29 +++++---- .../packaging/test/PackagingTestCase.java | 6 +- .../packaging/util/Distribution.java | 8 +-- .../packaging/util/docker/DockerRun.java | 14 ++--- .../RetentionLeasesReplicationTests.java | 8 +-- .../cluster/DiskUsageIntegTestCase.java | 18 +++--- .../MockInternalClusterInfoService.java | 20 +++--- .../AbstractCoordinatorTestCase.java | 14 ++--- .../index/engine/EngineTestCase.java | 1 + .../ESIndexLevelReplicationTestCase.java | 63 +++++++++---------- .../script/MockScriptEngine.java | 8 +-- .../elasticsearch/test/BackgroundIndexer.java | 4 +- .../test/ExternalTestCluster.java | 16 ++--- .../test/InternalTestCluster.java | 30 ++++----- .../org/elasticsearch/test/TestCluster.java | 4 +- .../elasticsearch/test/TestSearchContext.java | 38 +++++------ .../test/disruption/NetworkDisruption.java | 30 ++++----- .../test/disruption/SingleNodeDisruption.java | 20 +++--- .../test/rest/ESRestTestCase.java | 41 ++++++------ .../test/rest/FakeRestRequest.java | 16 ++--- .../test/rest/yaml/ObjectPath.java | 26 ++++---- .../yaml/restspec/ClientYamlSuiteRestApi.java | 16 ++--- .../test/rest/yaml/section/DoSection.java | 4 +- .../test/transport/FakeTransport.java | 8 +-- .../test/transport/MockTransport.java | 10 +-- .../analytics/boxplot/InternalBoxplot.java | 28 ++++----- .../multiterms/InternalMultiTerms.java | 21 ++++--- .../MultiTermsAggregationFactory.java | 10 +-- .../multiterms/MultiTermsAggregator.java | 6 +- .../normalize/NormalizePipelineMethods.java | 7 ++- .../rate/AbstractRateAggregator.java | 8 +-- .../xpack/analytics/rate/InternalRate.java | 8 +-- .../stringstats/InternalStringStats.java | 1 + .../TopMetricsAggregationBuilder.java | 2 +- .../xpack/search/AsyncSearchTask.java | 4 +- .../xpack/search/MutableSearchResponse.java | 11 ++-- .../xpack/async/AsyncResultsIndexPlugin.java | 2 +- .../xpack/autoscaling/Autoscaling.java | 21 +++---- .../autoscaling/AutoscalingMetadata.java | 6 +- .../action/PutAutoscalingPolicyAction.java | 6 +- ...ransportDeleteAutoscalingPolicyAction.java | 4 +- .../TransportPutAutoscalingPolicyAction.java | 4 +- .../memory/AutoscalingMemoryInfoService.java | 4 +- .../autoscaling/policy/AutoscalingPolicy.java | 6 +- .../ReactiveStorageDeciderService.java | 6 +- .../AutoscalingMemoryInfoServiceTests.java | 10 +-- .../ReactiveStorageDeciderDecisionTests.java | 15 +++-- .../java/org/elasticsearch/xpack/ccr/Ccr.java | 7 ++- .../ccr/action/AutoFollowCoordinator.java | 26 ++++---- .../xpack/ccr/action/ShardFollowNodeTask.java | 9 +-- .../ccr/action/ShardFollowTasksExecutor.java | 4 +- .../ccr/action/TransportPutFollowAction.java | 4 +- .../xpack/ccr/repository/CcrRepository.java | 18 +++--- .../ShardFollowTaskReplicationTests.java | 12 ++-- .../datastreams/DataStreamsSnapshotsIT.java | 16 ++--- .../logging/DeprecationIndexingAppender.java | 6 +- 59 files changed, 403 insertions(+), 398 deletions(-) diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java index 0d3d31129d78e..75eedf4a6b84c 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginAction.java @@ -467,8 +467,8 @@ Path downloadZip(String urlString, Path tmpDir) throws IOException { } // for testing only - void setEnvironment(Environment env) { - this.env = env; + void setEnvironment(Environment environment) { + this.env = environment; } // for testing only diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/SyncPluginsAction.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/SyncPluginsAction.java index edcc65ee60ede..ec12b3cedd3be 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/SyncPluginsAction.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/SyncPluginsAction.java @@ -113,7 +113,7 @@ public void execute() throws Exception { // @VisibleForTesting PluginChanges getPluginChanges(PluginsConfig pluginsConfig, Optional cachedPluginsConfig) throws PluginSyncException { - final List existingPlugins = getExistingPlugins(this.env); + final List existingPlugins = getExistingPlugins(); final List pluginsThatShouldExist = pluginsConfig.getPlugins(); final List pluginsThatActuallyExist = existingPlugins.stream() @@ -228,7 +228,7 @@ private List getPluginsToUpgrade( }).collect(Collectors.toList()); } - private List getExistingPlugins(Environment env) throws PluginSyncException { + private List getExistingPlugins() throws PluginSyncException { final List plugins = new ArrayList<>(); try { diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java index f8c8ef7e680f6..c926a4b986744 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/cli/InstallPluginActionTests.java @@ -291,15 +291,15 @@ void installPlugin(PluginDescriptor plugin, Path home, InstallPluginAction actio } void installPlugins(final List plugins, final Path home, final InstallPluginAction action) throws Exception { - final Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build()); - action.setEnvironment(env); + final Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build()); + action.setEnvironment(environment); action.execute(plugins); } - void assertPlugin(String name, Path original, Environment env) throws IOException { - assertPluginInternal(name, env.pluginsFile(), original); - assertConfigAndBin(name, original, env); - assertInstallCleaned(env); + void assertPlugin(String name, Path original, Environment environment) throws IOException { + assertPluginInternal(name, environment.pluginsFile(), original); + assertConfigAndBin(name, original, environment); + assertInstallCleaned(environment); } void assertPluginInternal(String name, Path pluginsFile, Path originalPlugin) throws IOException { @@ -331,9 +331,9 @@ void assertPluginInternal(String name, Path pluginsFile, Path originalPlugin) th assertFalse("config was not copied", Files.exists(got.resolve("config"))); } - void assertConfigAndBin(String name, Path original, Environment env) throws IOException { + void assertConfigAndBin(String name, Path original, Environment environment) throws IOException { if (Files.exists(original.resolve("bin"))) { - Path binDir = env.binFile().resolve(name); + Path binDir = environment.binFile().resolve(name); assertTrue("bin dir exists", Files.exists(binDir)); assertTrue("bin is a dir", Files.isDirectory(binDir)); try (DirectoryStream stream = Files.newDirectoryStream(binDir)) { @@ -347,7 +347,7 @@ void assertConfigAndBin(String name, Path original, Environment env) throws IOEx } } if (Files.exists(original.resolve("config"))) { - Path configDir = env.configFile().resolve(name); + Path configDir = environment.configFile().resolve(name); assertTrue("config dir exists", Files.exists(configDir)); assertTrue("config is a dir", Files.isDirectory(configDir)); @@ -355,7 +355,7 @@ void assertConfigAndBin(String name, Path original, Environment env) throws IOEx GroupPrincipal group = null; if (isPosix) { - PosixFileAttributes configAttributes = Files.getFileAttributeView(env.configFile(), PosixFileAttributeView.class) + PosixFileAttributes configAttributes = Files.getFileAttributeView(environment.configFile(), PosixFileAttributeView.class) .readAttributes(); user = configAttributes.owner(); group = configAttributes.group(); @@ -383,8 +383,8 @@ void assertConfigAndBin(String name, Path original, Environment env) throws IOEx } } - void assertInstallCleaned(Environment env) throws IOException { - try (DirectoryStream stream = Files.newDirectoryStream(env.pluginsFile())) { + void assertInstallCleaned(Environment environment) throws IOException { + try (DirectoryStream stream = Files.newDirectoryStream(environment.pluginsFile())) { for (Path file : stream) { if (file.getFileName().toString().startsWith(".installing")) { fail("Installation dir still exists, " + file); @@ -598,22 +598,22 @@ public void testBinPermissions() throws Exception { public void testPluginPermissions() throws Exception { assumeTrue("posix filesystem", isPosix); - final Path pluginDir = createPluginDir(temp); - final Path resourcesDir = pluginDir.resolve("resources"); - final Path platformDir = pluginDir.resolve("platform"); + final Path tempPluginDir = createPluginDir(temp); + final Path resourcesDir = tempPluginDir.resolve("resources"); + final Path platformDir = tempPluginDir.resolve("platform"); final Path platformNameDir = platformDir.resolve("linux-x86_64"); final Path platformBinDir = platformNameDir.resolve("bin"); Files.createDirectories(platformBinDir); - Files.createFile(pluginDir.resolve("fake-" + Version.CURRENT.toString() + ".jar")); + Files.createFile(tempPluginDir.resolve("fake-" + Version.CURRENT.toString() + ".jar")); Files.createFile(platformBinDir.resolve("fake_executable")); Files.createDirectory(resourcesDir); Files.createFile(resourcesDir.resolve("resource")); - final PluginDescriptor pluginZip = createPluginZip("fake", pluginDir); + final PluginDescriptor pluginZip = createPluginZip("fake", tempPluginDir); installPlugin(pluginZip); - assertPlugin("fake", pluginDir, env.v2()); + assertPlugin("fake", tempPluginDir, env.v2()); final Path fake = env.v2().pluginsFile().resolve("fake"); final Path resources = fake.resolve("resources"); @@ -729,9 +729,9 @@ public void testZipRelativeOutsideEntryName() throws Exception { } public void testOfficialPluginsHelpSortedAndMissingObviouslyWrongPlugins() throws Exception { - MockTerminal terminal = new MockTerminal(); - new MockInstallPluginCommand().main(new String[] { "--help" }, terminal); - try (BufferedReader reader = new BufferedReader(new StringReader(terminal.getOutput()))) { + MockTerminal mockTerminal = new MockTerminal(); + new MockInstallPluginCommand().main(new String[] { "--help" }, mockTerminal); + try (BufferedReader reader = new BufferedReader(new StringReader(mockTerminal.getOutput()))) { String line = reader.readLine(); // first find the beginning of our list of official plugins @@ -1360,7 +1360,8 @@ private String signature(final byte[] bytes, final PGPSecretKey secretKey) { // checks the plugin requires a policy confirmation, and does not install when that is rejected by the user // the plugin is installed after this method completes - private void assertPolicyConfirmation(Tuple env, PluginDescriptor pluginZip, String... warnings) throws Exception { + private void assertPolicyConfirmation(Tuple pathEnvironmentTuple, PluginDescriptor pluginZip, String... warnings) + throws Exception { for (int i = 0; i < warnings.length; ++i) { String warning = warnings[i]; for (int j = 0; j < i; ++j) { @@ -1372,7 +1373,7 @@ private void assertPolicyConfirmation(Tuple env, PluginDescri assertThat(e.getMessage(), containsString("installation aborted by user")); assertThat(terminal.getErrorOutput(), containsString("WARNING: " + warning)); - try (Stream fileStream = Files.list(env.v2().pluginsFile())) { + try (Stream fileStream = Files.list(pathEnvironmentTuple.v2().pluginsFile())) { assertThat(fileStream.collect(Collectors.toList()), empty()); } @@ -1385,7 +1386,7 @@ private void assertPolicyConfirmation(Tuple env, PluginDescri e = expectThrows(UserException.class, () -> installPlugin(pluginZip)); assertThat(e.getMessage(), containsString("installation aborted by user")); assertThat(terminal.getErrorOutput(), containsString("WARNING: " + warning)); - try (Stream fileStream = Files.list(env.v2().pluginsFile())) { + try (Stream fileStream = Files.list(pathEnvironmentTuple.v2().pluginsFile())) { assertThat(fileStream.collect(Collectors.toList()), empty()); } } diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index ed41c04931faa..745276d3c4145 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -782,7 +782,7 @@ public void testSingleDoc() throws IOException { * Tests that a single empty shard index is correctly recovered. Empty shards are often an edge case. */ public void testEmptyShard() throws IOException { - final String index = "test_empty_shard"; + final String indexName = "test_empty_shard"; if (isRunningAgainstOldCluster()) { Settings.Builder settings = Settings.builder() @@ -794,9 +794,9 @@ public void testEmptyShard() throws IOException { // before timing out .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - createIndex(index, settings.build()); + createIndex(indexName, settings.build()); } - ensureGreen(index); + ensureGreen(indexName); } /** @@ -1165,21 +1165,24 @@ public void testClosedIndices() throws Exception { * that the index has started shards. */ @SuppressWarnings("unchecked") - private void assertClosedIndex(final String index, final boolean checkRoutingTable) throws IOException { + private void assertClosedIndex(final String indexName, final boolean checkRoutingTable) throws IOException { final Map state = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state"))); - final Map metadata = (Map) XContentMapValues.extractValue("metadata.indices." + index, state); + final Map metadata = (Map) XContentMapValues.extractValue("metadata.indices." + indexName, state); assertThat(metadata, notNullValue()); assertThat(metadata.get("state"), equalTo("close")); - final Map blocks = (Map) XContentMapValues.extractValue("blocks.indices." + index, state); + final Map blocks = (Map) XContentMapValues.extractValue("blocks.indices." + indexName, state); assertThat(blocks, notNullValue()); assertThat(blocks.containsKey(String.valueOf(MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID)), is(true)); final Map settings = (Map) XContentMapValues.extractValue("settings", metadata); assertThat(settings, notNullValue()); - final Map routingTable = (Map) XContentMapValues.extractValue("routing_table.indices." + index, state); + final Map routingTable = (Map) XContentMapValues.extractValue( + "routing_table.indices." + indexName, + state + ); if (checkRoutingTable) { assertThat(routingTable, notNullValue()); assertThat(Booleans.parseBoolean((String) XContentMapValues.extractValue("index.verified_before_close", settings)), is(true)); @@ -1198,7 +1201,7 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab for (Map shard : shards) { assertThat(XContentMapValues.extractValue("shard", shard), equalTo(i)); assertThat(XContentMapValues.extractValue("state", shard), equalTo("STARTED")); - assertThat(XContentMapValues.extractValue("index", shard), equalTo(index)); + assertThat(XContentMapValues.extractValue("index", shard), equalTo(indexName)); } } } else { @@ -1353,12 +1356,12 @@ private String loadInfoDocument(String id) throws IOException { return m.group(1); } - private List dataNodes(String index, RestClient client) throws IOException { - Request request = new Request("GET", index + "/_stats"); + private List dataNodes(String indexName, RestClient client) throws IOException { + Request request = new Request("GET", indexName + "/_stats"); request.addParameter("level", "shards"); Response response = client.performRequest(request); List nodes = new ArrayList<>(); - List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards.0"); + List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + indexName + ".shards.0"); for (Object shard : shardStats) { final String nodeId = ObjectPath.evaluate(shard, "routing.node"); nodes.add(nodeId); @@ -1370,8 +1373,8 @@ private List dataNodes(String index, RestClient client) throws IOExcepti * Wait for an index to have green health, waiting longer than * {@link ESRestTestCase#ensureGreen}. */ - protected void ensureGreenLongWait(String index) throws IOException { - Request request = new Request("GET", "/_cluster/health/" + index); + protected void ensureGreenLongWait(String indexName) throws IOException { + Request request = new Request("GET", "/_cluster/health/" + indexName); request.addParameter("timeout", "2m"); request.addParameter("wait_for_status", "green"); request.addParameter("wait_for_no_relocating_shards", "true"); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java index e2f2344e3f907..a38935caf90af 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java @@ -126,12 +126,12 @@ public abstract class PackagingTestCase extends Assert { // the java installation already installed on the system protected static final String systemJavaHome; static { - Shell sh = new Shell(); + Shell initShell = new Shell(); if (Platforms.WINDOWS) { - systemJavaHome = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); + systemJavaHome = initShell.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); } else { assert Platforms.LINUX || Platforms.DARWIN; - systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + systemJavaHome = initShell.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); } } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java index 48c978cad62a0..f3fd1becab900 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java @@ -43,12 +43,8 @@ public Distribution(Path path) { this.platform = filename.contains("windows") ? Platform.WINDOWS : Platform.LINUX; this.hasJdk = filename.contains("no-jdk") == false; - String version = filename.split("-", 3)[1]; - this.baseVersion = version; - if (filename.contains("-SNAPSHOT")) { - version += "-SNAPSHOT"; - } - this.version = version; + this.baseVersion = filename.split("-", 3)[1]; + this.version = filename.contains("-SNAPSHOT") ? this.baseVersion + "-SNAPSHOT" : this.baseVersion; } public boolean isArchive() { diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java index fdf4201c96f12..30401d978ba2a 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/docker/DockerRun.java @@ -70,18 +70,18 @@ public DockerRun volume(Path from, Path to) { /** * Sets the UID that the container is run with, and the GID too if specified. * - * @param uid the UID to use, or {@code null} to use the image default - * @param gid the GID to use, or {@code null} to use the image default + * @param uidToUse the UID to use, or {@code null} to use the image default + * @param gidToUse the GID to use, or {@code null} to use the image default * @return the current builder */ - public DockerRun uid(Integer uid, Integer gid) { - if (uid == null) { - if (gid != null) { + public DockerRun uid(Integer uidToUse, Integer gidToUse) { + if (uidToUse == null) { + if (gidToUse != null) { throw new IllegalArgumentException("Cannot override GID without also overriding UID"); } } - this.uid = uid; - this.gid = gid; + this.uid = uidToUse; + this.gid = gidToUse; return this; } diff --git a/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java index 266b5b1b2608f..3d22ea609d811 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RetentionLeasesReplicationTests.java @@ -75,8 +75,8 @@ public void testOutOfOrderRetentionLeasesRequests() throws Exception { IndexMetadata indexMetadata = buildIndexMetadata(numberOfReplicas, settings, indexMapping); try (ReplicationGroup group = new ReplicationGroup(indexMetadata) { @Override - protected void syncRetentionLeases(ShardId shardId, RetentionLeases leases, ActionListener listener) { - listener.onResponse(new SyncRetentionLeasesResponse(new RetentionLeaseSyncAction.Request(shardId, leases))); + protected void syncRetentionLeases(ShardId id, RetentionLeases leases, ActionListener listener) { + listener.onResponse(new SyncRetentionLeasesResponse(new RetentionLeaseSyncAction.Request(id, leases))); } }) { group.startAll(); @@ -102,8 +102,8 @@ public void testSyncRetentionLeasesWithPrimaryPromotion() throws Exception { IndexMetadata indexMetadata = buildIndexMetadata(numberOfReplicas, settings, indexMapping); try (ReplicationGroup group = new ReplicationGroup(indexMetadata) { @Override - protected void syncRetentionLeases(ShardId shardId, RetentionLeases leases, ActionListener listener) { - listener.onResponse(new SyncRetentionLeasesResponse(new RetentionLeaseSyncAction.Request(shardId, leases))); + protected void syncRetentionLeases(ShardId id, RetentionLeases leases, ActionListener listener) { + listener.onResponse(new SyncRetentionLeasesResponse(new RetentionLeaseSyncAction.Request(id, leases))); } }) { group.startAll(); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/DiskUsageIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/DiskUsageIntegTestCase.java index 99de3ca43dc28..29930f8bdd996 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/DiskUsageIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/DiskUsageIntegTestCase.java @@ -114,11 +114,11 @@ public String name() { @Override public long getTotalSpace() throws IOException { - final long totalSpace = this.totalSpace; - if (totalSpace == -1) { + final long totalSpaceCopy = this.totalSpace; + if (totalSpaceCopy == -1) { return super.getTotalSpace(); } else { - return totalSpace; + return totalSpaceCopy; } } @@ -129,21 +129,21 @@ public void setTotalSpace(long totalSpace) { @Override public long getUsableSpace() throws IOException { - final long totalSpace = this.totalSpace; - if (totalSpace == -1) { + final long totalSpaceCopy = this.totalSpace; + if (totalSpaceCopy == -1) { return super.getUsableSpace(); } else { - return Math.max(0L, totalSpace - getTotalFileSize(path)); + return Math.max(0L, totalSpaceCopy - getTotalFileSize(path)); } } @Override public long getUnallocatedSpace() throws IOException { - final long totalSpace = this.totalSpace; - if (totalSpace == -1) { + final long totalSpaceCopy = this.totalSpace; + if (totalSpaceCopy == -1) { return super.getUnallocatedSpace(); } else { - return Math.max(0L, totalSpace - getTotalFileSize(path)); + return Math.max(0L, totalSpaceCopy - getTotalFileSize(path)); } } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index f346f012632e1..1d50a7ddfcfb3 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -40,13 +40,13 @@ public MockInternalClusterInfoService(Settings settings, ClusterService clusterS super(settings, clusterService, threadPool, client); } - public void setDiskUsageFunctionAndRefresh(BiFunction diskUsageFunction) { - this.diskUsageFunction = diskUsageFunction; + public void setDiskUsageFunctionAndRefresh(BiFunction diskUsageFn) { + this.diskUsageFunction = diskUsageFn; ClusterInfoServiceUtils.refresh(this); } - public void setShardSizeFunctionAndRefresh(Function shardSizeFunction) { - this.shardSizeFunction = shardSizeFunction; + public void setShardSizeFunctionAndRefresh(Function shardSizeFn) { + this.shardSizeFunction = shardSizeFn; ClusterInfoServiceUtils.refresh(this); } @@ -58,8 +58,8 @@ public ClusterInfo getClusterInfo() { @Override List adjustNodesStats(List nodesStats) { - final BiFunction diskUsageFunction = this.diskUsageFunction; - if (diskUsageFunction == null) { + final BiFunction diskUsageFunctionCopy = this.diskUsageFunction; + if (diskUsageFunctionCopy == null) { return nodesStats; } @@ -78,7 +78,7 @@ List adjustNodesStats(List nodesStats) { oldFsInfo.getTimestamp(), oldFsInfo.getIoStats(), StreamSupport.stream(oldFsInfo.spliterator(), false) - .map(fsInfoPath -> diskUsageFunction.apply(discoveryNode, fsInfoPath)) + .map(fsInfoPath -> diskUsageFunctionCopy.apply(discoveryNode, fsInfoPath)) .toArray(FsInfo.Path[]::new) ), nodeStats.getTransport(), @@ -108,12 +108,12 @@ class SizeFakingClusterInfo extends ClusterInfo { @Override public Long getShardSize(ShardRouting shardRouting) { - final Function shardSizeFunction = MockInternalClusterInfoService.this.shardSizeFunction; - if (shardSizeFunction == null) { + final Function shardSizeFunctionCopy = MockInternalClusterInfoService.this.shardSizeFunction; + if (shardSizeFunctionCopy == null) { return super.getShardSize(shardRouting); } - return shardSizeFunction.apply(shardRouting); + return shardSizeFunctionCopy.apply(shardRouting); } } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index cc41ca120ab87..150670bd29739 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -811,14 +811,14 @@ ClusterNode getAnyNode() { return getAnyNodeExcept(); } - ClusterNode getAnyNodeExcept(ClusterNode... clusterNodes) { - List filteredNodes = getAllNodesExcept(clusterNodes); + ClusterNode getAnyNodeExcept(ClusterNode... clusterNodesToExclude) { + List filteredNodes = getAllNodesExcept(clusterNodesToExclude); assert filteredNodes.isEmpty() == false; return randomFrom(filteredNodes); } - List getAllNodesExcept(ClusterNode... clusterNodes) { - Set forbiddenIds = Arrays.stream(clusterNodes).map(ClusterNode::getId).collect(Collectors.toSet()); + List getAllNodesExcept(ClusterNode... clusterNodesToExclude) { + Set forbiddenIds = Arrays.stream(clusterNodesToExclude).map(ClusterNode::getId).collect(Collectors.toSet()); return this.clusterNodes.stream().filter(n -> forbiddenIds.contains(n.getId()) == false).collect(Collectors.toList()); } @@ -1258,7 +1258,7 @@ ClusterNode restartedNode() { ClusterNode restartedNode( Function adaptGlobalMetadata, Function adaptCurrentTerm, - Settings nodeSettings + Settings settings ) { final Set allExceptVotingOnlyRole = DiscoveryNodeRole.roles() .stream() @@ -1273,7 +1273,7 @@ ClusterNode restartedNode( address.getAddress(), address, Collections.emptyMap(), - localNode.isMasterNode() && DiscoveryNode.isMasterNode(nodeSettings) ? allExceptVotingOnlyRole : emptySet(), + localNode.isMasterNode() && DiscoveryNode.isMasterNode(settings) ? allExceptVotingOnlyRole : emptySet(), Version.CURRENT ); try { @@ -1281,7 +1281,7 @@ ClusterNode restartedNode( nodeIndex, newLocalNode, node -> new MockPersistedState(newLocalNode, persistedState, adaptGlobalMetadata, adaptCurrentTerm), - nodeSettings, + settings, nodeHealthService ); } finally { diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 6cad713d58392..218ce75dd3ad8 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -141,6 +141,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; +@SuppressWarnings("HiddenField") public abstract class EngineTestCase extends ESTestCase { protected final ShardId shardId = new ShardId(new Index("index", "_na_"), 0); diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 41b2a2eed8244..2ac75c2c77b81 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -185,13 +185,13 @@ protected class ReplicationGroup implements AutoCloseable, Iterable ); private final RetentionLeaseSyncer retentionLeaseSyncer = new RetentionLeaseSyncer( - (shardId, primaryAllocationId, primaryTerm, retentionLeases, listener) -> syncRetentionLeases( - shardId, + (_shardId, primaryAllocationId, primaryTerm, retentionLeases, listener) -> syncRetentionLeases( + _shardId, retentionLeases, listener ), - (shardId, primaryAllocationId, primaryTerm, retentionLeases) -> syncRetentionLeases( - shardId, + (_shardId, primaryAllocationId, primaryTerm, retentionLeases) -> syncRetentionLeases( + _shardId, retentionLeases, ActionListener.wrap(r -> {}, e -> { throw new AssertionError("failed to background sync retention lease", e); }) ) @@ -208,13 +208,13 @@ protected ReplicationGroup(final IndexMetadata indexMetadata) throws IOException } } - private ShardRouting createShardRouting(String nodeId, boolean primary) { + private ShardRouting createShardRouting(String nodeId, boolean isPrimary) { return TestShardRouting.newShardRouting( shardId, nodeId, - primary, + isPrimary, ShardRoutingState.INITIALIZING, - primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE + isPrimary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE ); } @@ -337,10 +337,10 @@ assert shardRoutings().stream().anyMatch(shardRouting -> shardRouting.isSameAllo updateAllocationIDsOnPrimary(); } - protected synchronized void recoverPrimary(IndexShard primary) { - final DiscoveryNode pNode = getDiscoveryNode(primary.routingEntry().currentNodeId()); - primary.markAsRecovering("store", new RecoveryState(primary.routingEntry(), pNode, null)); - recoverFromStore(primary); + protected synchronized void recoverPrimary(IndexShard primaryShard) { + final DiscoveryNode pNode = getDiscoveryNode(primaryShard.routingEntry().currentNodeId()); + primaryShard.markAsRecovering("store", new RecoveryState(primaryShard.routingEntry(), pNode, null)); + recoverFromStore(primaryShard); } public synchronized IndexShard addReplicaWithExistingPath(final ShardPath shardPath, final String nodeId) throws IOException { @@ -401,7 +401,7 @@ public void onFailure(Exception e) { public synchronized void promoteReplicaToPrimary( IndexShard replica, - BiConsumer> primaryReplicaSyncer + BiConsumer> primaryReplicaSyncerArg ) throws IOException { final long newTerm = indexMetadata.primaryTerm(shardId.id()) + 1; IndexMetadata.Builder newMetadata = IndexMetadata.builder(indexMetadata).primaryTerm(shardId.id(), newTerm); @@ -416,7 +416,7 @@ public synchronized void promoteReplicaToPrimary( primary.updateShardState( primaryRouting, newTerm, - primaryReplicaSyncer, + primaryReplicaSyncerArg, currentClusterStateVersion.incrementAndGet(), activeIds(), routingTable @@ -584,12 +584,9 @@ private ReplicationTargets getReplicationTargets() { return replicationTargets; } - protected void syncRetentionLeases(ShardId shardId, RetentionLeases leases, ActionListener listener) { - new SyncRetentionLeases( - new RetentionLeaseSyncAction.Request(shardId, leases), - this, - listener.map(r -> new ReplicationResponse()) - ).execute(); + protected void syncRetentionLeases(ShardId id, RetentionLeases leases, ActionListener listener) { + new SyncRetentionLeases(new RetentionLeaseSyncAction.Request(id, leases), this, listener.map(r -> new ReplicationResponse())) + .execute(); } public synchronized RetentionLease addRetentionLease( @@ -717,8 +714,8 @@ public void failShard(String message, Exception exception) { } @Override - public void perform(Request request, ActionListener listener) { - performOnPrimary(getPrimaryShard(), request, listener); + public void perform(Request replicationRequest, ActionListener primaryResultListener) { + performOnPrimary(getPrimaryShard(), replicationRequest, primaryResultListener); } @Override @@ -767,20 +764,20 @@ class ReplicasRef implements ReplicationOperation.Replicas { @Override public void performOn( final ShardRouting replicaRouting, - final ReplicaRequest request, + final ReplicaRequest replicaRequest, final long primaryTerm, final long globalCheckpoint, final long maxSeqNoOfUpdatesOrDeletes, - final ActionListener listener + final ActionListener replicaResponseListener ) { IndexShard replica = replicationTargets.findReplicaShard(replicaRouting); replica.acquireReplicaOperationPermit( getPrimaryShard().getPendingPrimaryTerm(), globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, - listener.delegateFailure((delegatedListener, releasable) -> { + replicaResponseListener.delegateFailure((delegatedListener, releasable) -> { try { - performOnReplica(request, replica); + performOnReplica(replicaRequest, replica); releasable.close(); delegatedListener.onResponse( new ReplicaResponse(replica.getLocalCheckpoint(), replica.getLastKnownGlobalCheckpoint()) @@ -791,7 +788,7 @@ public void performOn( } }), ThreadPool.Names.WRITE, - request + replicaRequest ); } @@ -801,19 +798,19 @@ public void failShardIfNeeded( long primaryTerm, String message, Exception exception, - ActionListener listener + ActionListener actionListener ) { throw new UnsupportedOperationException("failing shard " + replica + " isn't supported. failure: " + message, exception); } @Override public void markShardCopyAsStaleIfNeeded( - ShardId shardId, + ShardId id, String allocationId, long primaryTerm, - ActionListener listener + ActionListener actionListener ) { - throw new UnsupportedOperationException("can't mark " + shardId + ", aid [" + allocationId + "] as stale"); + throw new UnsupportedOperationException("can't mark " + id + ", aid [" + allocationId + "] as stale"); } } @@ -837,8 +834,8 @@ public void setShardInfo(ReplicationResponse.ShardInfo shardInfo) { } @Override - public void runPostReplicationActions(ActionListener listener) { - listener.onResponse(null); + public void runPostReplicationActions(ActionListener actionListener) { + actionListener.onResponse(null); } } @@ -884,7 +881,7 @@ private void executeShardBulkOnPrimary( final PlainActionFuture permitAcquiredFuture = new PlainActionFuture<>(); primary.acquirePrimaryOperationPermit(permitAcquiredFuture, ThreadPool.Names.SAME, request); try (Releasable ignored = permitAcquiredFuture.actionGet()) { - MappingUpdatePerformer noopMappingUpdater = (update, shardId, listener1) -> {}; + MappingUpdatePerformer noopMappingUpdater = (_update, _shardId, _listener1) -> {}; TransportShardBulkAction.performOnPrimary( request, primary, diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index b3a83d4f9bcc8..8438cf68179bc 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -68,12 +68,12 @@ public MockScriptEngine( Map, ContextCompiler> contexts ) { - Map scripts = new HashMap<>(deterministicScripts.size() + nonDeterministicScripts.size()); - deterministicScripts.forEach((key, value) -> scripts.put(key, MockDeterministicScript.asDeterministic(value))); - nonDeterministicScripts.forEach((key, value) -> scripts.put(key, MockDeterministicScript.asNonDeterministic(value))); + Map scriptMap = new HashMap<>(deterministicScripts.size() + nonDeterministicScripts.size()); + deterministicScripts.forEach((key, value) -> scriptMap.put(key, MockDeterministicScript.asDeterministic(value))); + nonDeterministicScripts.forEach((key, value) -> scriptMap.put(key, MockDeterministicScript.asNonDeterministic(value))); this.type = type; - this.scripts = Collections.unmodifiableMap(scripts); + this.scripts = Collections.unmodifiableMap(scriptMap); this.contexts = Collections.unmodifiableMap(contexts); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java index fa0785954d186..0b60d832a9228 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java +++ b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java @@ -248,8 +248,8 @@ private XContentBuilder generateSource(long id, Random random) throws IOExceptio private volatile TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; - public void setRequestTimeout(TimeValue timeout) { - this.timeout = timeout; + public void setRequestTimeout(TimeValue requestTimeout) { + this.timeout = requestTimeout; } private volatile boolean ignoreIndexingFailures; diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java index d2ec6bf4ee0d9..a729664116729 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java @@ -105,11 +105,11 @@ public ExternalTestCluster( pluginClasses = new ArrayList<>(pluginClasses); pluginClasses.add(MockHttpTransport.TestPlugin.class); Settings clientSettings = clientSettingsBuilder.build(); - MockNode node = new MockNode(clientSettings, pluginClasses); - Client client = clientWrapper.apply(node.client()); + MockNode mockNode = new MockNode(clientSettings, pluginClasses); + Client wrappedClient = clientWrapper.apply(mockNode.client()); try { - node.start(); - NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().clear().setSettings(true).setHttp(true).get(); + mockNode.start(); + NodesInfoResponse nodeInfos = wrappedClient.admin().cluster().prepareNodesInfo().clear().setSettings(true).setHttp(true).get(); httpAddresses = new InetSocketAddress[nodeInfos.getNodes().size()]; int dataNodes = 0; int masterAndDataNodes = 0; @@ -125,20 +125,20 @@ public ExternalTestCluster( } this.numDataNodes = dataNodes; this.numMasterAndDataNodes = masterAndDataNodes; - this.client = client; - this.node = node; + this.client = wrappedClient; + this.node = mockNode; logger.info("Setup ExternalTestCluster [{}] made of [{}] nodes", nodeInfos.getClusterName().value(), size()); } catch (NodeValidationException e) { try { - IOUtils.close(client, node); + IOUtils.close(wrappedClient, mockNode); } catch (IOException e1) { e.addSuppressed(e1); } throw new ElasticsearchException(e); } catch (Exception e) { try { - IOUtils.close(client, node); + IOUtils.close(wrappedClient, mockNode); } catch (IOException e1) { e.addSuppressed(e1); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index c444a31e990a4..5ad0c9489354e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -901,7 +901,7 @@ private final class NodeAndClient implements Closeable { this.name = name; this.originalNodeSettings = originalNodeSettings; this.nodeAndClientId = nodeAndClientId; - markNodeDataDirsAsNotEligibleForWipe(node); + markNodeDataDirsAsNotEligibleForWipe(); } Node node() { @@ -1018,7 +1018,7 @@ public void afterStart() { } }); closed.set(false); - markNodeDataDirsAsNotEligibleForWipe(node); + markNodeDataDirsAsNotEligibleForWipe(); } @Override @@ -1028,7 +1028,7 @@ public void close() throws IOException { resetClient(); } finally { closed.set(true); - markNodeDataDirsAsPendingForWipe(node); + markNodeDataDirsAsPendingForWipe(); node.close(); try { if (node.awaitClose(10, TimeUnit.SECONDS) == false) { @@ -1040,17 +1040,17 @@ public void close() throws IOException { } } - private void markNodeDataDirsAsPendingForWipe(Node node) { + private void markNodeDataDirsAsPendingForWipe() { assert Thread.holdsLock(InternalTestCluster.this); - NodeEnvironment nodeEnv = node.getNodeEnvironment(); + NodeEnvironment nodeEnv = this.node.getNodeEnvironment(); if (nodeEnv.hasNodeFile()) { dataDirToClean.addAll(Arrays.asList(nodeEnv.nodeDataPaths())); } } - private void markNodeDataDirsAsNotEligibleForWipe(Node node) { + private void markNodeDataDirsAsNotEligibleForWipe() { assert Thread.holdsLock(InternalTestCluster.this); - NodeEnvironment nodeEnv = node.getNodeEnvironment(); + NodeEnvironment nodeEnv = this.node.getNodeEnvironment(); if (nodeEnv.hasNodeFile()) { dataDirToClean.removeAll(Arrays.asList(nodeEnv.nodeDataPaths())); } @@ -1984,14 +1984,14 @@ public synchronized Set nodesInclude(String index) { if (clusterService().state().routingTable().hasIndex(index)) { List allShards = clusterService().state().routingTable().allShards(index); DiscoveryNodes discoveryNodes = clusterService().state().getNodes(); - Set nodes = new HashSet<>(); + Set nodeNames = new HashSet<>(); for (ShardRouting shardRouting : allShards) { if (shardRouting.assignedToNode()) { DiscoveryNode discoveryNode = discoveryNodes.get(shardRouting.currentNodeId()); - nodes.add(discoveryNode.getName()); + nodeNames.add(discoveryNode.getName()); } } - return nodes; + return nodeNames; } return Collections.emptySet(); } @@ -2088,7 +2088,7 @@ public List startNodes(int numOfNodes, Settings settings) { */ public synchronized List startNodes(Settings... extraSettings) { final int newMasterCount = Math.toIntExact(Stream.of(extraSettings).filter(DiscoveryNode::isMasterNode).count()); - final List nodes = new ArrayList<>(); + final List nodeList = new ArrayList<>(); final int prevMasterCount = getMasterNodesCount(); int autoBootstrapMasterNodeIndex = autoManageMasterNodes && prevMasterCount == 0 @@ -2127,15 +2127,15 @@ public synchronized List startNodes(Settings... extraSettings) { firstNodeId + i, builder.put(nodeSettings).build(), false, - () -> rebuildUnicastHostFiles(nodes) + () -> rebuildUnicastHostFiles(nodeList) ); - nodes.add(nodeAndClient); + nodeList.add(nodeAndClient); } - startAndPublishNodesAndClients(nodes); + startAndPublishNodesAndClients(nodeList); if (autoManageMasterNodes) { validateClusterFormed(); } - return nodes.stream().map(NodeAndClient::getName).collect(Collectors.toList()); + return nodeList.stream().map(NodeAndClient::getName).collect(Collectors.toList()); } public List startMasterOnlyNodes(int numNodes) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index 3d551d3dfe064..ab6fd837c6b82 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -53,8 +53,8 @@ public long seed() { /** * This method should be executed before each test to reset the cluster to its initial state. */ - public void beforeTest(Random random) throws IOException, InterruptedException { - this.random = new Random(random.nextLong()); + public void beforeTest(Random randomGenerator) throws IOException, InterruptedException { + this.random = new Random(randomGenerator.nextLong()); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index 90c7b8a23b9ad..42f073a8a079b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -121,7 +121,7 @@ public void setSearcher(ContextIndexSearcher searcher) { public void preProcess() {} @Override - public Query buildFilteredQuery(Query query) { + public Query buildFilteredQuery(Query q) { return null; } @@ -166,8 +166,8 @@ public SearchContextAggregations aggregations() { } @Override - public SearchContext aggregations(SearchContextAggregations aggregations) { - this.aggregations = aggregations; + public SearchContext aggregations(SearchContextAggregations searchContextAggregations) { + this.aggregations = searchContextAggregations; return this; } @@ -302,8 +302,8 @@ public Float minimumScore() { } @Override - public SearchContext sort(SortAndFormats sort) { - this.sort = sort; + public SearchContext sort(SortAndFormats sortAndFormats) { + this.sort = sortAndFormats; return this; } @@ -313,8 +313,8 @@ public SortAndFormats sort() { } @Override - public SearchContext trackScores(boolean trackScores) { - this.trackScores = trackScores; + public SearchContext trackScores(boolean shouldTrackScores) { + this.trackScores = shouldTrackScores; return this; } @@ -324,8 +324,8 @@ public boolean trackScores() { } @Override - public SearchContext trackTotalHitsUpTo(int trackTotalHitsUpTo) { - this.trackTotalHitsUpTo = trackTotalHitsUpTo; + public SearchContext trackTotalHitsUpTo(int trackTotalHitsUpToValue) { + this.trackTotalHitsUpTo = trackTotalHitsUpToValue; return this; } @@ -335,8 +335,8 @@ public int trackTotalHitsUpTo() { } @Override - public SearchContext searchAfter(FieldDoc searchAfter) { - this.searchAfter = searchAfter; + public SearchContext searchAfter(FieldDoc searchAfterDoc) { + this.searchAfter = searchAfterDoc; return this; } @@ -356,8 +356,8 @@ public CollapseContext collapse() { } @Override - public SearchContext parsedPostFilter(ParsedQuery postFilter) { - this.postFilter = postFilter; + public SearchContext parsedPostFilter(ParsedQuery postFilterQuery) { + this.postFilter = postFilterQuery; return this; } @@ -367,9 +367,9 @@ public ParsedQuery parsedPostFilter() { } @Override - public SearchContext parsedQuery(ParsedQuery query) { - this.originalQuery = query; - this.query = query.query(); + public SearchContext parsedQuery(ParsedQuery parsedQuery) { + this.originalQuery = parsedQuery; + this.query = parsedQuery.query(); return this; } @@ -389,8 +389,8 @@ public int from() { } @Override - public SearchContext from(int from) { - this.from = from; + public SearchContext from(int fromValue) { + this.from = fromValue; return this; } @@ -404,7 +404,7 @@ public void setSize(int size) { } @Override - public SearchContext size(int size) { + public SearchContext size(int sizeValue) { return null; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java index 77811ce5f59fa..da04419d62bc0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisruption.java @@ -59,28 +59,28 @@ public NetworkLinkDisruptionType getNetworkLinkDisruptionType() { } @Override - public void applyToCluster(InternalTestCluster cluster) { - this.cluster = cluster; + public void applyToCluster(InternalTestCluster testCluster) { + this.cluster = testCluster; } @Override - public void removeFromCluster(InternalTestCluster cluster) { + public void removeFromCluster(InternalTestCluster testCluster) { stopDisrupting(); } @Override - public void removeAndEnsureHealthy(InternalTestCluster cluster) { - removeFromCluster(cluster); - ensureHealthy(cluster); + public void removeAndEnsureHealthy(InternalTestCluster testCluster) { + removeFromCluster(testCluster); + ensureHealthy(testCluster); } /** * ensures the cluster is healthy after the disruption */ - public void ensureHealthy(InternalTestCluster cluster) { + public void ensureHealthy(InternalTestCluster testCluster) { assert activeDisruption == false; - ensureNodeCount(cluster); - ensureFullyConnectedCluster(cluster); + ensureNodeCount(testCluster); + ensureFullyConnectedCluster(testCluster); } /** @@ -105,20 +105,20 @@ public static void ensureFullyConnectedCluster(InternalTestCluster cluster) { } } - protected void ensureNodeCount(InternalTestCluster cluster) { - cluster.validateClusterFormed(); + protected void ensureNodeCount(InternalTestCluster testCluster) { + testCluster.validateClusterFormed(); } @Override - public synchronized void applyToNode(String node, InternalTestCluster cluster) { + public synchronized void applyToNode(String node, InternalTestCluster testCluster) { } @Override - public synchronized void removeFromNode(String node1, InternalTestCluster cluster) { + public synchronized void removeFromNode(String node1, InternalTestCluster testCluster) { logger.info("stop disrupting node (disruption type: {}, disrupted links: {})", networkLinkDisruptionType, disruptedLinks); - applyToNodes(new String[] { node1 }, cluster.getNodeNames(), networkLinkDisruptionType::removeDisruption); - applyToNodes(cluster.getNodeNames(), new String[] { node1 }, networkLinkDisruptionType::removeDisruption); + applyToNodes(new String[] { node1 }, testCluster.getNodeNames(), networkLinkDisruptionType::removeDisruption); + applyToNodes(testCluster.getNodeNames(), new String[] { node1 }, networkLinkDisruptionType::removeDisruption); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java index 8aa73f7871435..a70afedb6f221 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java @@ -28,28 +28,28 @@ public SingleNodeDisruption(Random random) { } @Override - public void applyToCluster(InternalTestCluster cluster) { - this.cluster = cluster; + public void applyToCluster(InternalTestCluster testCluster) { + this.cluster = testCluster; if (disruptedNode == null) { - String[] nodes = cluster.getNodeNames(); + String[] nodes = testCluster.getNodeNames(); disruptedNode = nodes[random.nextInt(nodes.length)]; } } @Override - public void removeFromCluster(InternalTestCluster cluster) { + public void removeFromCluster(InternalTestCluster testCluster) { if (disruptedNode != null) { - removeFromNode(disruptedNode, cluster); + removeFromNode(disruptedNode, testCluster); } } @Override - public synchronized void applyToNode(String node, InternalTestCluster cluster) { + public synchronized void applyToNode(String node, InternalTestCluster testCluster) { } @Override - public synchronized void removeFromNode(String node, InternalTestCluster cluster) { + public synchronized void removeFromNode(String node, InternalTestCluster testCluster) { if (disruptedNode == null) { return; } @@ -65,14 +65,14 @@ public synchronized void testClusterClosed() { disruptedNode = null; } - protected void ensureNodeCount(InternalTestCluster cluster) { + protected void ensureNodeCount(InternalTestCluster testCluster) { assertFalse( "cluster failed to form after disruption was healed", - cluster.client() + testCluster.client() .admin() .cluster() .prepareHealth() - .setWaitForNodes(String.valueOf(cluster.size())) + .setWaitForNodes(String.valueOf(testCluster.size())) .setWaitForNoRelocatingShards(true) .get() .isTimedOut() diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index ed44973fb3656..b462137b133c0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -294,10 +294,10 @@ public void current(String... requiredWarnings) { /** * Adds to the set of warnings that are permissible (but not required) when running * in mixed-version clusters or those that differ in version from the test client. - * @param allowedWarnings optional warnings that will be ignored if received + * @param allowedWarningsToAdd optional warnings that will be ignored if received */ - public void compatible(String... allowedWarnings) { - this.allowedWarnings.addAll(Arrays.asList(allowedWarnings)); + public void compatible(String... allowedWarningsToAdd) { + this.allowedWarnings.addAll(Arrays.asList(allowedWarningsToAdd)); } @Override @@ -422,11 +422,11 @@ protected static RestClient adminClient() { * Wait for outstanding tasks to complete. The specified admin client is used to check the outstanding tasks and this is done using * {@link ESTestCase#assertBusy(CheckedRunnable)} to give a chance to any outstanding tasks to complete. * - * @param adminClient the admin client + * @param restClient the admin client * @throws Exception if an exception is thrown while checking the outstanding tasks */ - public static void waitForPendingTasks(final RestClient adminClient) throws Exception { - waitForPendingTasks(adminClient, taskName -> false); + public static void waitForPendingTasks(final RestClient restClient) throws Exception { + waitForPendingTasks(restClient, taskName -> false); } /** @@ -434,16 +434,16 @@ public static void waitForPendingTasks(final RestClient adminClient) throws Exce * {@link ESTestCase#assertBusy(CheckedRunnable)} to give a chance to any outstanding tasks to complete. The specified filter is used * to filter out outstanding tasks that are expected to be there. * - * @param adminClient the admin client + * @param restClient the admin client * @param taskFilter predicate used to filter tasks that are expected to be there * @throws Exception if an exception is thrown while checking the outstanding tasks */ - public static void waitForPendingTasks(final RestClient adminClient, final Predicate taskFilter) throws Exception { + public static void waitForPendingTasks(final RestClient restClient, final Predicate taskFilter) throws Exception { assertBusy(() -> { try { final Request request = new Request("GET", "/_cat/tasks"); request.addParameter("detailed", "true"); - final Response response = adminClient.performRequest(request); + final Response response = restClient.performRequest(request); /* * Check to see if there are outstanding tasks; we exclude the list task itself, and any expected outstanding tasks using * the specified task filter. @@ -1430,15 +1430,15 @@ public static void ensureHealth(String index, Consumer requestConsumer) ensureHealth(client(), index, requestConsumer); } - protected static void ensureHealth(RestClient client, String index, Consumer requestConsumer) throws IOException { + protected static void ensureHealth(RestClient restClient, String index, Consumer requestConsumer) throws IOException { Request request = new Request("GET", "/_cluster/health" + (index.isBlank() ? "" : "/" + index)); requestConsumer.accept(request); try { - client.performRequest(request); + restClient.performRequest(request); } catch (ResponseException e) { if (e.getResponse().getStatusLine().getStatusCode() == HttpStatus.SC_REQUEST_TIMEOUT) { try { - final Response clusterStateResponse = client.performRequest(new Request("GET", "/_cluster/state?pretty")); + final Response clusterStateResponse = restClient.performRequest(new Request("GET", "/_cluster/state?pretty")); fail( "timed out waiting for green state for index [" + index @@ -1496,9 +1496,9 @@ protected static void deleteIndex(String name) throws IOException { deleteIndex(client(), name); } - protected static void deleteIndex(RestClient client, String name) throws IOException { + protected static void deleteIndex(RestClient restClient, String name) throws IOException { Request request = new Request("DELETE", "/" + name); - client.performRequest(request); + restClient.performRequest(request); } protected static void updateIndexSettings(String index, Settings.Builder settings) throws IOException { @@ -1613,13 +1613,13 @@ protected static void registerRepository(String repository, String type, boolean registerRepository(client(), repository, type, verify, settings); } - protected static void registerRepository(RestClient client, String repository, String type, boolean verify, Settings settings) + protected static void registerRepository(RestClient restClient, String repository, String type, boolean verify, Settings settings) throws IOException { final Request request = new Request(HttpPut.METHOD_NAME, "_snapshot/" + repository); request.addParameter("verify", Boolean.toString(verify)); request.setJsonEntity(Strings.toString(new PutRepositoryRequest(repository).type(type).settings(settings))); - final Response response = client.performRequest(request); + final Response response = restClient.performRequest(request); assertAcked("Failed to create repository [" + repository + "] of type [" + type + "]: " + response, response); } @@ -1627,12 +1627,12 @@ protected static void createSnapshot(String repository, String snapshot, boolean createSnapshot(client(), repository, snapshot, waitForCompletion); } - protected static void createSnapshot(RestClient client, String repository, String snapshot, boolean waitForCompletion) + protected static void createSnapshot(RestClient restClient, String repository, String snapshot, boolean waitForCompletion) throws IOException { final Request request = new Request(HttpPut.METHOD_NAME, "_snapshot/" + repository + '/' + snapshot); request.addParameter("wait_for_completion", Boolean.toString(waitForCompletion)); - final Response response = client.performRequest(request); + final Response response = restClient.performRequest(request); assertThat( "Failed to create snapshot [" + snapshot + "] in repository [" + repository + "]: " + response, response.getStatusLine().getStatusCode(), @@ -1656,12 +1656,13 @@ protected static void deleteSnapshot(String repository, String snapshot, boolean deleteSnapshot(client(), repository, snapshot, ignoreMissing); } - protected static void deleteSnapshot(RestClient client, String repository, String snapshot, boolean ignoreMissing) throws IOException { + protected static void deleteSnapshot(RestClient restClient, String repository, String snapshot, boolean ignoreMissing) + throws IOException { final Request request = new Request(HttpDelete.METHOD_NAME, "_snapshot/" + repository + '/' + snapshot); if (ignoreMissing) { request.addParameter("ignore", "404"); } - final Response response = client.performRequest(request); + final Response response = restClient.performRequest(request); assertThat(response.getStatusLine().getStatusCode(), ignoreMissing ? anyOf(equalTo(200), equalTo(404)) : equalTo(200)); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java index 66bad2fe602fd..32902b49b515b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java @@ -111,17 +111,17 @@ public HttpRequest removeHeader(String header) { } @Override - public HttpResponse createResponse(RestStatus status, BytesReference content) { - Map headers = new HashMap<>(); + public HttpResponse createResponse(RestStatus status, BytesReference unused) { + Map responseHeaders = new HashMap<>(); return new HttpResponse() { @Override public void addHeader(String name, String value) { - headers.put(name, value); + responseHeaders.put(name, value); } @Override public boolean containsHeader(String name) { - return headers.containsKey(name); + return responseHeaders.containsKey(name); } }; } @@ -212,8 +212,8 @@ public Builder withParams(Map params) { return this; } - public Builder withContent(BytesReference content, XContentType xContentType) { - this.content = content; + public Builder withContent(BytesReference contentBytes, XContentType xContentType) { + this.content = contentBytes; if (xContentType != null) { headers.put("Content-Type", Collections.singletonList(xContentType.mediaType())); } @@ -230,8 +230,8 @@ public Builder withMethod(Method method) { return this; } - public Builder withRemoteAddress(InetSocketAddress address) { - this.address = address; + public Builder withRemoteAddress(InetSocketAddress remoteAddress) { + this.address = remoteAddress; return this; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java index 0bf67360e565d..06cff22d70394 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java @@ -24,7 +24,7 @@ import java.util.Map; /** - * Holds an object and allows to extract specific values from it given their path + * Holds an object and allows extraction of specific values from it, given their path */ public class ObjectPath { @@ -77,24 +77,24 @@ public T evaluate(String path) throws IOException { @SuppressWarnings("unchecked") public T evaluate(String path, Stash stash) throws IOException { String[] parts = parsePath(path); - Object object = this.object; + Object result = this.object; for (String part : parts) { - object = evaluate(part, object, stash); - if (object == null) { + result = evaluate(part, result, stash); + if (result == null) { return null; } } - return (T) object; + return (T) result; } @SuppressWarnings("unchecked") - private Object evaluate(String key, Object object, Stash stash) throws IOException { + private Object evaluate(String key, Object objectToEvaluate, Stash stash) throws IOException { if (stash.containsStashedValue(key)) { key = stash.getValue(key).toString(); } - if (object instanceof Map) { - final Map objectAsMap = (Map) object; + if (objectToEvaluate instanceof Map) { + final Map objectAsMap = (Map) objectToEvaluate; if ("_arbitrary_key_".equals(key)) { if (objectAsMap.isEmpty()) { throw new IllegalArgumentException("requested [" + key + "] but the map was empty"); @@ -106,10 +106,10 @@ private Object evaluate(String key, Object object, Stash stash) throws IOExcepti } return objectAsMap.get(key); } - if (object instanceof List) { - List list = (List) object; + if (objectToEvaluate instanceof List) { + List list = (List) objectToEvaluate; try { - return list.get(Integer.valueOf(key)); + return list.get(Integer.parseInt(key)); } catch (NumberFormatException e) { throw new IllegalArgumentException("element was a list, but [" + key + "] was not numeric", e); } catch (IndexOutOfBoundsException e) { @@ -120,7 +120,9 @@ private Object evaluate(String key, Object object, Stash stash) throws IOExcepti } } - throw new IllegalArgumentException("no object found for [" + key + "] within object of class [" + object.getClass() + "]"); + throw new IllegalArgumentException( + "no object found for [" + key + "] within object of class [" + objectToEvaluate.getClass() + "]" + ); } private String[] parsePath(String path) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApi.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApi.java index 13c099af6e4ef..b6264e0a6d5e7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApi.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/restspec/ClientYamlSuiteRestApi.java @@ -166,11 +166,11 @@ public List getRequestMimeTypes() { * - /{index}/_alias/{name}, /{index}/_aliases/{name} * - /{index}/{type}/_mapping, /{index}/{type}/_mappings, /{index}/_mappings/{type}, /{index}/_mapping/{type} */ - public List getBestMatchingPaths(Set params) { + public List getBestMatchingPaths(Set pathParams) { PriorityQueue> queue = new PriorityQueue<>(Comparator.comparing(Tuple::v1, (a, b) -> Integer.compare(b, a))); for (ClientYamlSuiteRestApi.Path path : paths) { int matches = 0; - for (String actualParameter : params) { + for (String actualParameter : pathParams) { if (path.getParts().contains(actualParameter)) { matches++; } @@ -180,17 +180,17 @@ public List getBestMatchingPaths(Set params } } if (queue.isEmpty()) { - throw new IllegalStateException("Unable to find a matching path for api [" + name + "]" + params); + throw new IllegalStateException("Unable to find a matching path for api [" + name + "]" + pathParams); } - List paths = new ArrayList<>(); + List pathsByRelevance = new ArrayList<>(); Tuple poll = queue.poll(); int maxMatches = poll.v1(); do { - paths.add(poll.v2()); + pathsByRelevance.add(poll.v2()); poll = queue.poll(); } while (poll != null && poll.v1() == maxMatches); - return paths; + return pathsByRelevance; } public static class Path { @@ -224,8 +224,8 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - Path path = (Path) o; - return this.path.equals(path.path); + Path other = (Path) o; + return this.path.equals(other.path); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 35f29168d3623..5cabfc3bb4bc4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -252,8 +252,8 @@ public String getCatch() { return catchParam; } - public void setCatch(String catchParam) { - this.catchParam = catchParam; + public void setCatch(String param) { + this.catchParam = param; } public ApiCallSection getApiCallSection() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/FakeTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/FakeTransport.java index 4f750d9d563ee..d17b7aa021078 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/FakeTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/FakeTransport.java @@ -35,11 +35,11 @@ public class FakeTransport extends AbstractLifecycleComponent implements Transpo private TransportMessageListener listener; @Override - public void setMessageListener(TransportMessageListener listener) { + public void setMessageListener(TransportMessageListener messageListener) { if (this.listener != null) { throw new IllegalStateException("listener already set"); } - this.listener = listener; + this.listener = messageListener; } @Override @@ -63,8 +63,8 @@ public List getDefaultSeedAddresses() { } @Override - public void openConnection(DiscoveryNode node, ConnectionProfile profile, ActionListener listener) { - listener.onResponse(new CloseableConnection() { + public void openConnection(DiscoveryNode node, ConnectionProfile profile, ActionListener actionListener) { + actionListener.onResponse(new CloseableConnection() { @Override public DiscoveryNode getNode() { return node; diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java index 8d6c078fd12c5..55d2e66bdcc0d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java @@ -74,7 +74,9 @@ public TransportService createTransportService( public MockTransport() { super(new FakeTransport()); - setDefaultConnectBehavior((transport, discoveryNode, profile, listener) -> listener.onResponse(createConnection(discoveryNode))); + setDefaultConnectBehavior( + (transport, discoveryNode, profile, actionListener) -> actionListener.onResponse(createConnection(discoveryNode)) + ); } /** @@ -172,12 +174,12 @@ public void sendRequest(long requestId, String action, TransportRequest request, protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) {} @Override - public void setMessageListener(TransportMessageListener listener) { + public void setMessageListener(TransportMessageListener messageListener) { if (this.listener != null) { throw new IllegalStateException("listener already set"); } - this.listener = listener; - super.setMessageListener(listener); + this.listener = messageListener; + super.setMessageListener(messageListener); } protected NamedWriteableRegistry writeableRegistry() { diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/InternalBoxplot.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/InternalBoxplot.java index 477ea5c9b6ba6..b731e992b8e3b 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/InternalBoxplot.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/InternalBoxplot.java @@ -44,8 +44,8 @@ enum Metrics { } @Override - double value(TDigestState state) { - return state == null ? Double.NEGATIVE_INFINITY : state.getMin(); + double value(TDigestState digestState) { + return digestState == null ? Double.NEGATIVE_INFINITY : digestState.getMin(); } }, MAX { @@ -55,8 +55,8 @@ enum Metrics { } @Override - double value(TDigestState state) { - return state == null ? Double.POSITIVE_INFINITY : state.getMax(); + double value(TDigestState digestState) { + return digestState == null ? Double.POSITIVE_INFINITY : digestState.getMax(); } }, Q1 { @@ -66,8 +66,8 @@ enum Metrics { } @Override - double value(TDigestState state) { - return state == null ? Double.NaN : state.quantile(0.25); + double value(TDigestState digestState) { + return digestState == null ? Double.NaN : digestState.quantile(0.25); } }, Q2 { @@ -77,8 +77,8 @@ enum Metrics { } @Override - double value(TDigestState state) { - return state == null ? Double.NaN : state.quantile(0.5); + double value(TDigestState digestState) { + return digestState == null ? Double.NaN : digestState.quantile(0.5); } }, Q3 { @@ -88,8 +88,8 @@ enum Metrics { } @Override - double value(TDigestState state) { - return state == null ? Double.NaN : state.quantile(0.75); + double value(TDigestState digestState) { + return digestState == null ? Double.NaN : digestState.quantile(0.75); } }, LOWER { @@ -99,8 +99,8 @@ enum Metrics { } @Override - double value(TDigestState state) { - return whiskers(state)[0]; + double value(TDigestState digestState) { + return whiskers(digestState)[0]; } }, UPPER { @@ -110,8 +110,8 @@ enum Metrics { } @Override - double value(TDigestState state) { - return whiskers(state)[1]; + double value(TDigestState digestState) { + return whiskers(digestState)[1]; } }; diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java index 9a55a991fb771..d43c15582e9c5 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java @@ -350,6 +350,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { } @Override + @SuppressWarnings("HiddenField") protected InternalMultiTerms create( String name, List buckets, @@ -415,11 +416,13 @@ protected int getRequiredSize() { } @Override + @SuppressWarnings("HiddenField") protected Bucket createBucket(long docCount, InternalAggregations aggs, long docCountError, Bucket prototype) { return new Bucket(prototype.terms, docCount, aggs, prototype.showDocCountError, docCountError, formats, keyConverters); } @Override + @SuppressWarnings("HiddenField") public InternalMultiTerms create(List buckets) { return new InternalMultiTerms( name, @@ -493,9 +496,9 @@ private boolean[] needsPromotionToDouble(List aggregations) private InternalAggregation promoteToDouble(InternalAggregation aggregation, boolean[] needsPromotion) { InternalMultiTerms multiTerms = (InternalMultiTerms) aggregation; - List buckets = multiTerms.getBuckets(); + List multiTermsBuckets = multiTerms.getBuckets(); List> newKeys = new ArrayList<>(); - for (InternalMultiTerms.Bucket bucket : buckets) { + for (InternalMultiTerms.Bucket bucket : multiTermsBuckets) { newKeys.add(new ArrayList<>(bucket.terms.size())); } @@ -505,20 +508,20 @@ private InternalAggregation promoteToDouble(InternalAggregation aggregation, boo DocValueFormat format = formats.get(i); if (needsPromotion[i]) { newKeyConverters.add(KeyConverter.DOUBLE); - for (int j = 0; j < buckets.size(); j++) { - newKeys.get(j).add(converter.toDouble(format, buckets.get(j).terms.get(i))); + for (int j = 0; j < multiTermsBuckets.size(); j++) { + newKeys.get(j).add(converter.toDouble(format, multiTermsBuckets.get(j).terms.get(i))); } } else { newKeyConverters.add(converter); - for (int j = 0; j < buckets.size(); j++) { - newKeys.get(j).add(buckets.get(j).terms.get(i)); + for (int j = 0; j < multiTermsBuckets.size(); j++) { + newKeys.get(j).add(multiTermsBuckets.get(j).terms.get(i)); } } } - List newBuckets = new ArrayList<>(buckets.size()); - for (int i = 0; i < buckets.size(); i++) { - Bucket oldBucket = buckets.get(i); + List newBuckets = new ArrayList<>(multiTermsBuckets.size()); + for (int i = 0; i < multiTermsBuckets.size(); i++) { + Bucket oldBucket = multiTermsBuckets.get(i); newBuckets.add( new Bucket( newKeys.get(i), diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationFactory.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationFactory.java index 2642f5c3c6111..fb9d1f1f5f35e 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationFactory.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationFactory.java @@ -57,15 +57,15 @@ public MultiTermsAggregationFactory( @Override protected Aggregator createInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) throws IOException { - TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds(this.bucketCountThresholds); + TermsAggregator.BucketCountThresholds thresholds = new TermsAggregator.BucketCountThresholds(this.bucketCountThresholds); if (InternalOrder.isKeyOrder(order) == false - && bucketCountThresholds.getShardSize() == MultiTermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) { + && thresholds.getShardSize() == MultiTermsAggregationBuilder.DEFAULT_BUCKET_COUNT_THRESHOLDS.getShardSize()) { // The user has not made a shardSize selection. Use default // heuristic to avoid any wrong-ranking caused by distributed // counting - bucketCountThresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(bucketCountThresholds.getRequiredSize())); + thresholds.setShardSize(BucketUtils.suggestShardSideQueueSize(thresholds.getRequiredSize())); } - bucketCountThresholds.ensureValidity(); + thresholds.ensureValidity(); return new MultiTermsAggregator( name, factories, @@ -76,7 +76,7 @@ protected Aggregator createInternal(Aggregator parent, CardinalityUpperBound car showTermDocCountError, order, collectMode, - bucketCountThresholds, + thresholds, cardinality, metadata ); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java index 65f26c391612b..2e445389cb7c5 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java @@ -146,11 +146,11 @@ List termValuesList(LeafReaderContext ctx) throws IOException { List> docTerms(List termValuesList, int doc) throws IOException { List> terms = new ArrayList<>(); for (TermValues termValues : termValuesList) { - List values = termValues.collectValues(doc); - if (values == null) { + List collectValues = termValues.collectValues(doc); + if (collectValues == null) { return null; } - terms.add(values); + terms.add(collectValues); } return terms; } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineMethods.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineMethods.java index 7b7f11846ee00..150dcfeeb8a50 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineMethods.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineMethods.java @@ -94,14 +94,14 @@ static class Softmax implements DoubleUnaryOperator { private double sumExp; Softmax(double[] values) { - double sumExp = 0.0; + double _sumExp = 0.0; for (Double value : values) { if (value.isNaN() == false) { - sumExp += Math.exp(value); + _sumExp += Math.exp(value); } } - this.sumExp = sumExp; + this.sumExp = _sumExp; } @Override @@ -117,6 +117,7 @@ abstract static class SinglePassSimpleStatisticsMethod implements DoubleUnaryOpe protected final double mean; protected final int count; + @SuppressWarnings("HiddenField") SinglePassSimpleStatisticsMethod(double[] values) { int count = 0; double sum = 0.0; diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/AbstractRateAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/AbstractRateAggregator.java index 5105ee73729ca..263969b59e932 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/AbstractRateAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/AbstractRateAggregator.java @@ -61,20 +61,20 @@ public AbstractRateAggregator( } private SizedBucketAggregator findSizedBucketAncestor() { - SizedBucketAggregator sizedBucketAggregator = null; + SizedBucketAggregator aggregator = null; for (Aggregator ancestor = parent; ancestor != null; ancestor = ancestor.parent()) { if (ancestor instanceof SizedBucketAggregator) { - sizedBucketAggregator = (SizedBucketAggregator) ancestor; + aggregator = (SizedBucketAggregator) ancestor; break; } } - if (sizedBucketAggregator == null) { + if (aggregator == null) { throw new IllegalArgumentException( "The rate aggregation can only be used inside a date histogram aggregation or " + "composite aggregation with one date histogram value source" ); } - return sizedBucketAggregator; + return aggregator; } @Override diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalRate.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalRate.java index c221fc612336e..4181e25a2864c 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalRate.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalRate.java @@ -72,15 +72,15 @@ public InternalRate reduce(List aggregations, ReduceContext // Compute the sum of double values with Kahan summation algorithm which is more // accurate than naive summation. CompensatedSum kahanSummation = new CompensatedSum(0, 0); - Double divisor = null; + Double firstDivisor = null; for (InternalAggregation aggregation : aggregations) { double value = ((InternalRate) aggregation).sum; kahanSummation.add(value); - if (divisor == null) { - divisor = ((InternalRate) aggregation).divisor; + if (firstDivisor == null) { + firstDivisor = ((InternalRate) aggregation).divisor; } } - return new InternalRate(name, kahanSummation.value(), divisor, format, getMetadata()); + return new InternalRate(name, kahanSummation.value(), firstDivisor, format, getMetadata()); } @Override diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/InternalStringStats.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/InternalStringStats.java index b39cc0fda4e9c..e21a99fecaec4 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/InternalStringStats.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/InternalStringStats.java @@ -199,6 +199,7 @@ public Object value(String name) { } @Override + @SuppressWarnings("HiddenField") public InternalStringStats reduce(List aggregations, ReduceContext reduceContext) { long count = 0; long totalLength = 0; diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java index bdf28c33786fe..3d634ecfb2d21 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java @@ -142,7 +142,7 @@ public TopMetricsAggregationBuilder( */ public TopMetricsAggregationBuilder(StreamInput in) throws IOException { super(in); - @SuppressWarnings("unchecked") + @SuppressWarnings({ "unchecked", "HiddenField" }) List> sortBuilders = (List>) (List) in.readNamedWriteableList(SortBuilder.class); this.sortBuilders = sortBuilders; this.size = in.readVInt(); diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java index 924581f57c829..d7027859a9107 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java @@ -132,8 +132,8 @@ Listener getSearchProgressActionListener() { * Update the expiration time of the (partial) response. */ @Override - public void setExpirationTime(long expirationTimeMillis) { - this.expirationTimeMillis = expirationTimeMillis; + public void setExpirationTime(long expirationTime) { + this.expirationTimeMillis = expirationTime; } @Override diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java index 482679085650b..fc67c1bc7886d 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java @@ -85,6 +85,7 @@ class MutableSearchResponse { * Updates the response with the result of a partial reduction. * @param reducedAggs is a strategy for producing the reduced aggs */ + @SuppressWarnings("HiddenField") synchronized void updatePartialResponse( int successfulShards, TotalHits totalHits, @@ -138,11 +139,11 @@ synchronized void updateWithFailure(ElasticsearchException exc) { /** * Adds a shard failure concurrently (non-blocking). */ - void addQueryFailure(int shardIndex, ShardSearchFailure failure) { + void addQueryFailure(int shardIndex, ShardSearchFailure shardSearchFailure) { synchronized (this) { failIfFrozen(); } - queryFailures.set(shardIndex, failure); + queryFailures.set(shardIndex, shardSearchFailure); } private SearchResponse buildResponse(long taskStartTimeNanos, InternalAggregations reducedAggs) { @@ -290,9 +291,9 @@ private ShardSearchFailure[] buildQueryFailures() { } List failures = new ArrayList<>(); for (int i = 0; i < queryFailures.length(); i++) { - ShardSearchFailure failure = queryFailures.get(i); - if (failure != null) { - failures.add(failure); + ShardSearchFailure shardSearchFailure = queryFailures.get(i); + if (shardSearchFailure != null) { + failures.add(shardSearchFailure); } } return failures.toArray(ShardSearchFailure[]::new); diff --git a/x-pack/plugin/async/src/main/java/org/elasticsearch/xpack/async/AsyncResultsIndexPlugin.java b/x-pack/plugin/async/src/main/java/org/elasticsearch/xpack/async/AsyncResultsIndexPlugin.java index 1031228a31f37..416ef4809344a 100644 --- a/x-pack/plugin/async/src/main/java/org/elasticsearch/xpack/async/AsyncResultsIndexPlugin.java +++ b/x-pack/plugin/async/src/main/java/org/elasticsearch/xpack/async/AsyncResultsIndexPlugin.java @@ -43,7 +43,7 @@ public AsyncResultsIndexPlugin(Settings settings) { } @Override - public Collection getSystemIndexDescriptors(Settings settings) { + public Collection getSystemIndexDescriptors(Settings unused) { return List.of(AsyncTaskIndexService.getSystemIndexDescriptor()); } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java index 3cdb18c7ae24d..7245531b87ac3 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java @@ -88,7 +88,7 @@ public class Autoscaling extends Plugin implements ActionPlugin, ExtensiblePlugi ); private final List autoscalingExtensions; - private final SetOnce clusterService = new SetOnce<>(); + private final SetOnce clusterServiceHolder = new SetOnce<>(); private final SetOnce allocationDeciders = new SetOnce<>(); private final AutoscalingLicenseChecker autoscalingLicenseChecker; @@ -115,7 +115,7 @@ public Collection createComponents( IndexNameExpressionResolver indexNameExpressionResolver, Supplier repositoriesServiceSupplier ) { - this.clusterService.set(clusterService); + this.clusterServiceHolder.set(clusterService); return List.of( new AutoscalingCalculateCapacityService.Holder(this), autoscalingLicenseChecker, @@ -209,26 +209,19 @@ public void loadExtensions(ExtensionLoader loader) { @Override public Collection deciders() { assert allocationDeciders.get() != null; + final ClusterService clusterService = clusterServiceHolder.get(); return List.of( new FixedAutoscalingDeciderService(), - new ReactiveStorageDeciderService( - clusterService.get().getSettings(), - clusterService.get().getClusterSettings(), - allocationDeciders.get() - ), - new ProactiveStorageDeciderService( - clusterService.get().getSettings(), - clusterService.get().getClusterSettings(), - allocationDeciders.get() - ), + new ReactiveStorageDeciderService(clusterService.getSettings(), clusterService.getClusterSettings(), allocationDeciders.get()), + new ProactiveStorageDeciderService(clusterService.getSettings(), clusterService.getClusterSettings(), allocationDeciders.get()), new FrozenShardsDeciderService(), new FrozenStorageDeciderService(), new FrozenExistenceDeciderService() ); } - public Set createDeciderServices(AllocationDeciders allocationDeciders) { - this.allocationDeciders.set(allocationDeciders); + public Set createDeciderServices(AllocationDeciders deciders) { + this.allocationDeciders.set(deciders); return autoscalingExtensions.stream().flatMap(p -> p.deciders().stream()).collect(Collectors.toSet()); } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadata.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadata.java index 2489063244e2b..a5a80d15661ab 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadata.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadata.java @@ -74,12 +74,12 @@ public AutoscalingMetadata(final SortedMap po public AutoscalingMetadata(final StreamInput in) throws IOException { final int size = in.readVInt(); - final SortedMap policies = new TreeMap<>(); + final SortedMap policiesMap = new TreeMap<>(); for (int i = 0; i < size; i++) { final AutoscalingPolicyMetadata policyMetadata = new AutoscalingPolicyMetadata(in); - policies.put(policyMetadata.policy().name(), policyMetadata); + policiesMap.put(policyMetadata.policy().name(), policyMetadata); } - this.policies = policies; + this.policies = policiesMap; } @Override diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/PutAutoscalingPolicyAction.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/PutAutoscalingPolicyAction.java index 0a1f92960fa73..64b29e1d83eaa 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/PutAutoscalingPolicyAction.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/PutAutoscalingPolicyAction.java @@ -93,11 +93,11 @@ public Request(final StreamInput in) throws IOException { } if (in.readBoolean()) { int deciderCount = in.readInt(); - SortedMap deciders = new TreeMap<>(); + SortedMap decidersMap = new TreeMap<>(); for (int i = 0; i < deciderCount; ++i) { - deciders.put(in.readString(), Settings.readSettingsFromStream(in)); + decidersMap.put(in.readString(), Settings.readSettingsFromStream(in)); } - this.deciders = Collections.unmodifiableSortedMap(deciders); + this.deciders = Collections.unmodifiableSortedMap(decidersMap); } else { this.deciders = null; } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyAction.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyAction.java index 6c0f0b6e0d500..a6fcc13294cb2 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyAction.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyAction.java @@ -34,7 +34,7 @@ public class TransportDeleteAutoscalingPolicyAction extends AcknowledgedTransportMasterNodeAction { - private static final Logger logger = LogManager.getLogger(TransportPutAutoscalingPolicyAction.class); + private static final Logger LOGGER = LogManager.getLogger(TransportPutAutoscalingPolicyAction.class); @Inject public TransportDeleteAutoscalingPolicyAction( @@ -68,7 +68,7 @@ protected void masterOperation( clusterService.submitStateUpdateTask("delete-autoscaling-policy", new AckedClusterStateUpdateTask(request, listener) { @Override public ClusterState execute(final ClusterState currentState) { - return deleteAutoscalingPolicy(currentState, request.name(), logger); + return deleteAutoscalingPolicy(currentState, request.name(), LOGGER); } }); } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyAction.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyAction.java index 71c21f99ed847..915fa51f46f7b 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyAction.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyAction.java @@ -39,7 +39,7 @@ public class TransportPutAutoscalingPolicyAction extends AcknowledgedTransportMasterNodeAction { - private static final Logger logger = LogManager.getLogger(TransportPutAutoscalingPolicyAction.class); + private static final Logger LOGGER = LogManager.getLogger(TransportPutAutoscalingPolicyAction.class); private final PolicyValidator policyValidator; private final AutoscalingLicenseChecker autoscalingLicenseChecker; @@ -104,7 +104,7 @@ protected void masterOperation( clusterService.submitStateUpdateTask("put-autoscaling-policy", new AckedClusterStateUpdateTask(request, listener) { @Override public ClusterState execute(final ClusterState currentState) { - return putAutoscalingPolicy(currentState, request, policyValidator, logger); + return putAutoscalingPolicy(currentState, request, policyValidator, LOGGER); } }); } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/memory/AutoscalingMemoryInfoService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/memory/AutoscalingMemoryInfoService.java index 13e52b96180c5..e3f18f2c1c56e 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/memory/AutoscalingMemoryInfoService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/memory/AutoscalingMemoryInfoService.java @@ -185,9 +185,9 @@ private void addNodeStats(ImmutableOpenMap.Builder builder, NodeSt } public AutoscalingMemoryInfo snapshot() { - final ImmutableOpenMap nodeToMemory = this.nodeToMemory; + final ImmutableOpenMap nodeToMemoryRef = this.nodeToMemory; return node -> { - Long result = nodeToMemory.get(node.getEphemeralId()); + Long result = nodeToMemoryRef.get(node.getEphemeralId()); // noinspection NumberEquality if (result == FETCHING_SENTINEL) { return null; diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/policy/AutoscalingPolicy.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/policy/AutoscalingPolicy.java index f592ff21a9efe..64b9974f483c4 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/policy/AutoscalingPolicy.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/policy/AutoscalingPolicy.java @@ -90,11 +90,11 @@ public AutoscalingPolicy(final StreamInput in) throws IOException { this.name = in.readString(); this.roles = in.readSet(StreamInput::readString).stream().collect(Sets.toUnmodifiableSortedSet()); int deciderCount = in.readInt(); - SortedMap deciders = new TreeMap<>(); + SortedMap decidersMap = new TreeMap<>(); for (int i = 0; i < deciderCount; ++i) { - deciders.put(in.readString(), Settings.readSettingsFromStream(in)); + decidersMap.put(in.readString(), Settings.readSettingsFromStream(in)); } - this.deciders = Collections.unmodifiableSortedMap(deciders); + this.deciders = Collections.unmodifiableSortedMap(decidersMap); } @Override diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java index 4a43819356d3d..03a694511787e 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java @@ -399,7 +399,7 @@ private IndexMetadata indexMetadata(ShardRouting shard, RoutingAllocation alloca return allocation.metadata().getIndexSafe(shard.index()); } - private Optional highestPreferenceTier(List preferredTiers, DiscoveryNodes nodes) { + private Optional highestPreferenceTier(List preferredTiers, DiscoveryNodes unused) { assert preferredTiers.isEmpty() == false; return Optional.of(preferredTiers.get(0)); } @@ -429,8 +429,8 @@ private long getExpectedShardSize(ShardRouting shard) { } long unmovableSize(String nodeId, Collection shards) { - ClusterInfo info = this.info; - DiskUsage diskUsage = info.getNodeMostAvailableDiskUsages().get(nodeId); + ClusterInfo clusterInfo = this.info; + DiskUsage diskUsage = clusterInfo.getNodeMostAvailableDiskUsages().get(nodeId); if (diskUsage == null) { // do not want to scale up then, since this should only happen when node has just joined (clearly edge case). return 0; diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/memory/AutoscalingMemoryInfoServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/memory/AutoscalingMemoryInfoServiceTests.java index 2e8089c2f1986..67cb99ca3904b 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/memory/AutoscalingMemoryInfoServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/memory/AutoscalingMemoryInfoServiceTests.java @@ -395,9 +395,9 @@ public void respond(NodesStatsResponse response, Runnable whileFetching) { }); } - public void respond(BiConsumer> responder) { - assertThat(responder, notNullValue()); - this.responder = responder; + public void respond(BiConsumer> responderValue) { + assertThat(responderValue, notNullValue()); + this.responder = responderValue; } @Override @@ -410,11 +410,11 @@ protected void NodesStatsRequest nodesStatsRequest = (NodesStatsRequest) request; assertThat(nodesStatsRequest.timeout(), equalTo(fetchTimeout)); assertThat(responder, notNullValue()); - BiConsumer> responder = this.responder; + BiConsumer> responderValue = this.responder; this.responder = null; @SuppressWarnings("unchecked") ActionListener statsListener = (ActionListener) listener; - responder.accept(nodesStatsRequest, statsListener); + responderValue.accept(nodesStatsRequest, statsListener); } public void assertNoResponder() { diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderDecisionTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderDecisionTests.java index da619f5392d82..5e4fac860182d 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderDecisionTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderDecisionTests.java @@ -128,13 +128,13 @@ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAl @Before public void setup() { - ClusterState state = ClusterState.builder(new ClusterName("test")).build(); - state = addRandomIndices(hotNodes, hotNodes, state); - state = addDataNodes(DATA_HOT_NODE_ROLE, "hot", state, hotNodes); - state = addDataNodes(DATA_WARM_NODE_ROLE, "warm", state, warmNodes); - this.state = state; + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).build(); + clusterState = addRandomIndices(hotNodes, hotNodes, clusterState); + clusterState = addDataNodes(DATA_HOT_NODE_ROLE, "hot", clusterState, hotNodes); + clusterState = addDataNodes(DATA_WARM_NODE_ROLE, "warm", clusterState, warmNodes); + this.state = clusterState; - Set shardIds = shardIds(state.getRoutingNodes().unassigned()); + Set shardIds = shardIds(clusterState.getRoutingNodes().unassigned()); this.subjectShards = new HashSet<>(randomSubsetOf(randomIntBetween(1, shardIds.size()), shardIds)); } @@ -353,8 +353,7 @@ private void verify(VerificationSubject subject, long expected, AllocationDecide } private void verify(VerificationSubject subject, long expected, DiscoveryNodeRole role, AllocationDecider... allocationDeciders) { - ClusterState state = this.state; - verify(state, subject, expected, role, allocationDeciders); + verify(this.state, subject, expected, role, allocationDeciders); } private static void verify( diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index a234153535f97..d3d85d3f1f28e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -171,6 +171,7 @@ public Ccr(final Settings settings) { } @Override + @SuppressWarnings("HiddenField") public Collection createComponents( final Client client, final ClusterService clusterService, @@ -211,6 +212,7 @@ public Collection createComponents( } @Override + @SuppressWarnings("HiddenField") public List> getPersistentTasksExecutor( ClusterService clusterService, ThreadPool threadPool, @@ -271,7 +273,7 @@ public List> getPersistentTasksExecutor( } public List getRestHandlers( - Settings settings, + Settings unused, RestController restController, ClusterSettings clusterSettings, IndexScopedSettings indexScopedSettings, @@ -367,6 +369,7 @@ public Optional getEngineFactory(final IndexSettings indexSetting } } + @SuppressWarnings("HiddenField") public List> getExecutorBuilders(Settings settings) { if (enabled == false) { return Collections.emptyList(); @@ -417,7 +420,7 @@ public Collection> ind } @Override - public Collection createAllocationDeciders(Settings settings, ClusterSettings clusterSettings) { + public Collection createAllocationDeciders(Settings unused, ClusterSettings clusterSettings) { return List.of(new CcrPrimaryFollowerAllocationDecider()); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index d2c825da6f3d8..f514694e83396 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -146,9 +146,9 @@ protected void doClose() { } public synchronized AutoFollowStats getStats() { - final Map autoFollowers = this.autoFollowers; + final Map autoFollowersCopy = this.autoFollowers; final TreeMap timesSinceLastAutoFollowPerRemoteCluster = new TreeMap<>(); - for (Map.Entry entry : autoFollowers.entrySet()) { + for (Map.Entry entry : autoFollowersCopy.entrySet()) { long lastAutoFollowTimeInMillis = entry.getValue().lastAutoFollowTimeInMillis; long lastSeenMetadataVersion = entry.getValue().metadataVersion; if (lastAutoFollowTimeInMillis != -1) { @@ -227,13 +227,13 @@ void updateAutoFollowers(ClusterState followerClusterState) { return; } - final CopyOnWriteHashMap autoFollowers = CopyOnWriteHashMap.copyOf(this.autoFollowers); + final CopyOnWriteHashMap autoFollowersCopy = CopyOnWriteHashMap.copyOf(this.autoFollowers); Set newRemoteClusters = autoFollowMetadata.getPatterns() .values() .stream() .filter(AutoFollowPattern::isActive) .map(AutoFollowPattern::getRemoteCluster) - .filter(remoteCluster -> autoFollowers.containsKey(remoteCluster) == false) + .filter(remoteCluster -> autoFollowersCopy.containsKey(remoteCluster) == false) .collect(Collectors.toSet()); Map newAutoFollowers = new HashMap<>(newRemoteClusters.size()); @@ -313,7 +313,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } List removedRemoteClusters = new ArrayList<>(); - for (Map.Entry entry : autoFollowers.entrySet()) { + for (Map.Entry entry : autoFollowersCopy.entrySet()) { String remoteCluster = entry.getKey(); AutoFollower autoFollower = entry.getValue(); boolean exist = autoFollowMetadata.getPatterns() @@ -334,7 +334,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } } assert assertNoOtherActiveAutoFollower(newAutoFollowers); - this.autoFollowers = autoFollowers.copyAndPutAll(newAutoFollowers).copyAndRemoveAll(removedRemoteClusters); + this.autoFollowers = autoFollowersCopy.copyAndPutAll(newAutoFollowers).copyAndRemoveAll(removedRemoteClusters); } private boolean assertNoOtherActiveAutoFollower(Map newAutoFollowers) { @@ -527,7 +527,7 @@ private void autoFollowIndices( private void checkAutoFollowPattern( String autoFollowPattenName, - String remoteCluster, + String remoteClusterString, AutoFollowPattern autoFollowPattern, List leaderIndicesToFollow, Map headers, @@ -603,7 +603,7 @@ private void checkAutoFollowPattern( } else { followLeaderIndex( autoFollowPattenName, - remoteCluster, + remoteClusterString, indexToFollow, autoFollowPattern, headers, @@ -633,7 +633,7 @@ private static boolean leaderIndexAlreadyFollowed(AutoFollowPattern autoFollowPa private void followLeaderIndex( String autoFollowPattenName, - String remoteCluster, + String remoteClusterString, Index indexToFollow, AutoFollowPattern pattern, Map headers, @@ -643,7 +643,7 @@ private void followLeaderIndex( final String followIndexName = getFollowerIndexName(pattern, leaderIndexName); PutFollowAction.Request request = new PutFollowAction.Request(); - request.setRemoteCluster(remoteCluster); + request.setRemoteCluster(remoteClusterString); request.setLeaderIndex(indexToFollow.getName()); request.setFollowerIndex(followIndexName); request.setSettings(pattern.getSettings()); @@ -852,13 +852,13 @@ static class AutoFollowResult { AutoFollowResult(String autoFollowPatternName, List> results) { this.autoFollowPatternName = autoFollowPatternName; - Map autoFollowExecutionResults = new HashMap<>(); + Map mutableAutoFollowExecutionResults = new HashMap<>(); for (Tuple result : results) { - autoFollowExecutionResults.put(result.v1(), result.v2()); + mutableAutoFollowExecutionResults.put(result.v1(), result.v2()); } this.clusterStateFetchException = null; - this.autoFollowExecutionResults = Collections.unmodifiableMap(autoFollowExecutionResults); + this.autoFollowExecutionResults = Collections.unmodifiableMap(mutableAutoFollowExecutionResults); } AutoFollowResult(String autoFollowPatternName, Exception e) { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index 541af27e6f6af..2e502e30f53f3 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -136,6 +136,7 @@ protected boolean removeEldestEntry(final Map.Entry operations, - long leaderMaxSeqNoOfUpdatesOrDeletes, + long leaderMaxSequenceNoOfUpdatesOrDeletes, AtomicInteger retryCounter ) { - assert leaderMaxSeqNoOfUpdatesOrDeletes != SequenceNumbers.UNASSIGNED_SEQ_NO : "mus is not replicated"; + assert leaderMaxSequenceNoOfUpdatesOrDeletes != SequenceNumbers.UNASSIGNED_SEQ_NO : "mus is not replicated"; final long startTime = relativeTimeProvider.getAsLong(); - innerSendBulkShardOperationsRequest(followerHistoryUUID, operations, leaderMaxSeqNoOfUpdatesOrDeletes, response -> { + innerSendBulkShardOperationsRequest(followerHistoryUUID, operations, leaderMaxSequenceNoOfUpdatesOrDeletes, response -> { synchronized (ShardFollowNodeTask.this) { totalWriteTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); successfulWriteRequests++; @@ -459,7 +460,7 @@ private void sendBulkShardOperationsRequest( handleFailure( e, retryCounter, - () -> sendBulkShardOperationsRequest(operations, leaderMaxSeqNoOfUpdatesOrDeletes, retryCounter) + () -> sendBulkShardOperationsRequest(operations, leaderMaxSequenceNoOfUpdatesOrDeletes, retryCounter) ); }); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 07504e9ce41c0..b5736916240b7 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -606,12 +606,12 @@ protected void nodeOperation(final AllocatedPersistentTask task, final ShardFoll } private void fetchFollowerShardInfo( - final Client client, + final Client followerClient, final ShardId shardId, final FollowerStatsInfoHandler handler, final Consumer errorHandler ) { - client.admin().indices().stats(new IndicesStatsRequest().indices(shardId.getIndexName()), ActionListener.wrap(r -> { + followerClient.admin().indices().stats(new IndicesStatsRequest().indices(shardId.getIndexName()), ActionListener.wrap(r -> { IndexStats indexStats = r.getIndex(shardId.getIndexName()); if (indexStats == null) { IndexMetadata indexMetadata = clusterService.state().metadata().index(shardId.getIndex()); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index 16931cd330a3c..ae5dbdeef564f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -274,7 +274,7 @@ public void onFailure(Exception e) { } private void initiateFollowing( - final Client client, + final Client clientWithHeaders, final PutFollowAction.Request request, final ActionListener listener ) { @@ -283,7 +283,7 @@ private void initiateFollowing( ResumeFollowAction.Request resumeFollowRequest = new ResumeFollowAction.Request(); resumeFollowRequest.setFollowerIndex(request.getFollowerIndex()); resumeFollowRequest.setParameters(new FollowParameters(parameters)); - client.execute( + clientWithHeaders.execute( ResumeFollowAction.INSTANCE, resumeFollowRequest, ActionListener.wrap( diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 7b588aa922788..f6bb4b75cda7e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -195,8 +195,8 @@ public void getSnapshotInfo(GetSnapshotInfoContext context) { .setMetadata(true) .setNodes(true) .get(ccrSettings.getRecoveryActionTimeout()); - Metadata metadata = response.getState().metadata(); - ImmutableOpenMap indicesMap = metadata.indices(); + Metadata responseMetadata = response.getState().metadata(); + ImmutableOpenMap indicesMap = responseMetadata.indices(); List indices = new ArrayList<>(indicesMap.keySet()); // fork to the snapshot meta pool because the context expects to run on it and asserts that it does @@ -206,7 +206,7 @@ public void getSnapshotInfo(GetSnapshotInfoContext context) { new SnapshotInfo( new Snapshot(this.metadata.name(), SNAPSHOT_ID), indices, - new ArrayList<>(metadata.dataStreams().keySet()), + new ArrayList<>(responseMetadata.dataStreams().keySet()), Collections.emptyList(), response.getState().getNodes().getMaxNodeVersion(), SnapshotState.SUCCESS @@ -248,12 +248,12 @@ public IndexMetadata getSnapshotIndexMetaData(RepositoryData repositoryData, Sna IndexMetadata.Builder imdBuilder = IndexMetadata.builder(leaderIndex); // Adding the leader index uuid for each shard as custom metadata: - Map metadata = new HashMap<>(); - metadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS, String.join(",", leaderHistoryUUIDs)); - metadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY, leaderIndexMetadata.getIndexUUID()); - metadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY, leaderIndexMetadata.getIndex().getName()); - metadata.put(Ccr.CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY, remoteClusterAlias); - imdBuilder.putCustom(Ccr.CCR_CUSTOM_METADATA_KEY, metadata); + Map customMetadata = new HashMap<>(); + customMetadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS, String.join(",", leaderHistoryUUIDs)); + customMetadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY, leaderIndexMetadata.getIndexUUID()); + customMetadata.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY, leaderIndexMetadata.getIndex().getName()); + customMetadata.put(Ccr.CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY, remoteClusterAlias); + imdBuilder.putCustom(Ccr.CCR_CUSTOM_METADATA_KEY, customMetadata); imdBuilder.settings(leaderIndexMetadata.getSettings()); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index b188b1663994e..de1feb86440b7 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -504,11 +504,11 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { } @Override - protected synchronized void recoverPrimary(IndexShard primary) { + protected synchronized void recoverPrimary(IndexShard primaryShard) { DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); Snapshot snapshot = new Snapshot("foo", new SnapshotId("bar", UUIDs.randomBase64UUID())); ShardRouting routing = ShardRoutingHelper.newWithRestoreSource( - primary.routingEntry(), + primaryShard.routingEntry(), new RecoverySource.SnapshotRecoverySource( UUIDs.randomBase64UUID(), snapshot, @@ -516,9 +516,9 @@ protected synchronized void recoverPrimary(IndexShard primary) { new IndexId("test", UUIDs.randomBase64UUID(random())) ) ); - primary.markAsRecovering("remote recovery from leader", new RecoveryState(routing, localNode, null)); + primaryShard.markAsRecovering("remote recovery from leader", new RecoveryState(routing, localNode, null)); final PlainActionFuture future = PlainActionFuture.newFuture(); - primary.restoreFromRepository(new RestoreOnlyRepository(index.getName()) { + primaryShard.restoreFromRepository(new RestoreOnlyRepository(index.getName()) { @Override public void restoreShard( Store store, @@ -530,11 +530,11 @@ public void restoreShard( ) { ActionListener.completeWith(listener, () -> { IndexShard leader = leaderGroup.getPrimary(); - Lucene.cleanLuceneIndex(primary.store().directory()); + Lucene.cleanLuceneIndex(primaryShard.store().directory()); try (Engine.IndexCommitRef sourceCommit = leader.acquireSafeIndexCommit()) { Store.MetadataSnapshot sourceSnapshot = leader.store().getMetadata(sourceCommit.getIndexCommit()); for (StoreFileMetadata md : sourceSnapshot) { - primary.store() + primaryShard.store() .directory() .copyFrom(leader.store().directory(), md.name(), md.name(), IOContext.DEFAULT); } diff --git a/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 608720d2bea0d..683ef7310a016 100644 --- a/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -310,16 +310,16 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); String id2 = indexResponse.getId(); - String id; + String idToGet; String dataStreamToSnapshot; String backingIndexName; if (randomBoolean()) { dataStreamToSnapshot = "ds"; - id = this.id; + idToGet = this.id; backingIndexName = this.dsBackingIndexName; } else { dataStreamToSnapshot = "other-ds"; - id = id2; + idToGet = id2; backingIndexName = this.otherDsBackingIndexName; } boolean filterDuringSnapshotting = randomBoolean(); @@ -354,7 +354,7 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio assertEquals(1, restoreSnapshotResponse.getRestoreInfo().successfulShards()); - assertEquals(DOCUMENT_SOURCE, client.prepareGet(backingIndexName, id).get().getSourceAsMap()); + assertEquals(DOCUMENT_SOURCE, client.prepareGet(backingIndexName, idToGet).get().getSourceAsMap()); SearchHit[] hits = client.prepareSearch(backingIndexName).get().getHits().getHits(); assertEquals(1, hits.length); assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); @@ -845,7 +845,7 @@ public void testDataStreamNotIncludedInLimitedSnapshot() throws ExecutionExcepti } public void testDeleteDataStreamDuringSnapshot() throws Exception { - Client client = client(); + Client client1 = client(); // this test uses a MockRepository assertAcked(client().admin().cluster().prepareDeleteRepository(REPO)); @@ -866,7 +866,7 @@ public void testDeleteDataStreamDuringSnapshot() throws Exception { logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { - client.prepareIndex(dataStream) + client1.prepareIndex(dataStream) .setOpType(DocWriteRequest.OpType.CREATE) .setId(Integer.toString(i)) .setSource(Collections.singletonMap("@timestamp", "2020-12-12")) @@ -877,7 +877,7 @@ public void testDeleteDataStreamDuringSnapshot() throws Exception { assertDocCount(dataStream, 100L); logger.info("--> snapshot"); - ActionFuture future = client.admin() + ActionFuture future = client1.admin() .cluster() .prepareCreateSnapshot(repositoryName, SNAPSHOT) .setIndices(dataStream) @@ -890,7 +890,7 @@ public void testDeleteDataStreamDuringSnapshot() throws Exception { // non-partial snapshots do not allow delete operations on data streams where snapshot has not been completed try { logger.info("--> delete index while non-partial snapshot is running"); - client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { dataStream })).actionGet(); + client1.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { dataStream })).actionGet(); fail("Expected deleting index to fail during snapshot"); } catch (SnapshotInProgressException e) { assertThat(e.getMessage(), containsString("Cannot delete data streams that are being snapshotted: [" + dataStream)); diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingAppender.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingAppender.java index 5e7429efeaec1..edd9a85862b01 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingAppender.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingAppender.java @@ -71,10 +71,10 @@ public void append(LogEvent event) { /** * Sets whether this appender is enabled or disabled. When disabled, the appender will * not perform indexing operations. - * @param isEnabled the enabled status of the appender. + * @param enabled the enabled status of the appender. */ - public void setEnabled(boolean isEnabled) { - this.isEnabled = isEnabled; + public void setEnabled(boolean enabled) { + this.isEnabled = enabled; } /** From 1c623d0f46fc21cc5584343dcb810cdf2f8df5e0 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Tue, 23 Nov 2021 12:03:45 +0200 Subject: [PATCH 15/88] [ML] No need to use parent task client when internal infer delegates (#80905) In #80731 the infer trained model task was correctly set to have as parent task the internal infer action task when called from there. However, it was done by both setting `Request.setParentTaskId` and using a `ParentTaskAssigningClient`. There is no need to use a parent task client. Instead, to set the parent task on the request we should use `setParentTask` instead of `setParentTaskId` which effectively sets the target task for a `BaseTasksRequest`. The confusion of `BaseTasksRequest` holding two fields both names `parentTaskId` and having two methods both setting the parent task id will be addressed in a separate PR. Co-authored-by: Elastic Machine --- .../xpack/ml/action/TransportInternalInferModelAction.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java index 18e66f785fdf4..83afae8777884 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.client.Client; -import org.elasticsearch.client.ParentTaskAssigningClient; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.core.TimeValue; @@ -190,9 +189,9 @@ private void inferSingleDocAgainstAllocatedModel( Collections.singletonList(doc), TimeValue.MAX_VALUE ); - request.setParentTaskId(taskId); + request.setParentTask(taskId); executeAsyncWithOrigin( - new ParentTaskAssigningClient(client, taskId), + client, ML_ORIGIN, InferTrainedModelDeploymentAction.INSTANCE, request, From b9ae8fdb13a6d6ba35a74553649df6d07872b043 Mon Sep 17 00:00:00 2001 From: Adam Locke Date: Tue, 23 Nov 2021 07:42:56 -0500 Subject: [PATCH 16/88] [DOCS] Fix elasticsearch-reset-password typo (#80919) --- docs/reference/setup/install/docker.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 00de1e5f16dc2..89f0f56f9a72a 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -103,7 +103,7 @@ For example: [source,sh] ---- -docker exec -it es-node01 /usr/share/elasticsearch/bin/reset-elastic-password +docker exec -it es-node01 /usr/share/elasticsearch/bin/elasticsearch-reset-password ---- ==== From 98279cc4dcee5fd47bede4facd93d317d627eefa Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Tue, 23 Nov 2021 14:07:31 +0000 Subject: [PATCH 17/88] Rework breaking changes for new structure (#80907) The structure of the breaking changes / migration guide was updated in #79162 to change the categories and split the breaking changes into different files. This PR amends the changelog generator code in line with this rework. --- .../release/BreakingChangesGenerator.java | 122 +++++++++++++++--- .../internal/release/ChangelogEntry.java | 21 ++- .../release/GenerateReleaseNotesTask.java | 41 +++++- .../internal/release/ReleaseToolsPlugin.java | 6 +- .../src/main/resources/changelog-schema.json | 6 + .../templates/breaking-changes-area.asciidoc | 39 ++++++ .../templates/breaking-changes.asciidoc | 39 +----- .../release/BreakingChangesGeneratorTest.java | 52 +++++++- .../release/GenerateReleaseNotesTaskTest.java | 2 - ...gesGeneratorTest.generateAreaFile.asciidoc | 33 +++++ ...sGeneratorTest.generateIndexFile.asciidoc} | 53 +------- 11 files changed, 293 insertions(+), 121 deletions(-) create mode 100644 build-tools-internal/src/main/resources/templates/breaking-changes-area.asciidoc create mode 100644 build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateAreaFile.asciidoc rename build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/{BreakingChangesGeneratorTest.generateFile.asciidoc => BreakingChangesGeneratorTest.generateIndexFile.asciidoc} (54%) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java index fc33c288cf944..286f23d83e5bb 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/BreakingChangesGenerator.java @@ -11,6 +11,7 @@ import com.google.common.annotations.VisibleForTesting; import org.elasticsearch.gradle.VersionProperties; +import org.gradle.api.GradleException; import java.io.File; import java.io.FileWriter; @@ -18,49 +19,110 @@ import java.nio.file.Files; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.TreeMap; +import java.util.TreeSet; import java.util.stream.Collectors; import static java.util.Comparator.comparing; import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.toCollection; /** - * Generates the page that lists the breaking changes and deprecations for a minor version release. + * Generates the page that contains an index into the breaking changes and lists deprecations for a minor version release, + * and the individual pages for each breaking area. */ public class BreakingChangesGenerator { - static void update(File templateFile, File outputFile, List entries) throws IOException { - try (FileWriter output = new FileWriter(outputFile)) { + // Needs to match `changelog-schema.json` + private static final List BREAKING_AREAS = List.of( + "Cluster and node setting", + "Command line tool", + "Index setting", + "JVM option", + "Java API", + "Logging", + "Mapping", + "Packaging", + "Painless", + "REST API", + "System requirement", + "Transform" + ); + + static void update( + File indexTemplateFile, + File indexOutputFile, + File outputDirectory, + File areaTemplateFile, + List entries + ) throws IOException { + if (outputDirectory.exists()) { + if (outputDirectory.isDirectory() == false) { + throw new GradleException("Path [" + outputDirectory + "] exists but isn't a directory!"); + } + } else { + Files.createDirectory(outputDirectory.toPath()); + } + + try (FileWriter output = new FileWriter(indexOutputFile)) { output.write( - generateFile(QualifiedVersion.of(VersionProperties.getElasticsearch()), Files.readString(templateFile.toPath()), entries) + generateIndexFile( + QualifiedVersion.of(VersionProperties.getElasticsearch()), + Files.readString(indexTemplateFile.toPath()), + entries + ) ); } - } - @VisibleForTesting - static String generateFile(QualifiedVersion version, String template, List entries) throws IOException { + String areaTemplate = Files.readString(areaTemplateFile.toPath()); - final Map>> breakingChangesByNotabilityByArea = entries.stream() - .map(ChangelogEntry::getBreaking) - .filter(Objects::nonNull) - .sorted(comparing(ChangelogEntry.Breaking::getTitle)) - .collect( - groupingBy( - ChangelogEntry.Breaking::isNotable, - groupingBy(ChangelogEntry.Breaking::getArea, TreeMap::new, Collectors.toList()) - ) - ); + for (String breakingArea : BREAKING_AREAS) { + final List entriesForArea = entries.stream() + .map(ChangelogEntry::getBreaking) + .filter(entry -> entry != null && breakingArea.equals(entry.getArea())) + .collect(Collectors.toList()); + + if (entriesForArea.isEmpty()) { + continue; + } + + final String outputFilename = breakingArea.toLowerCase(Locale.ROOT).replaceFirst(" and", "").replaceAll(" ", "-") + + "-changes.asciidoc"; + + try (FileWriter output = new FileWriter(outputDirectory.toPath().resolve(outputFilename).toFile())) { + output.write( + generateBreakingAreaFile( + QualifiedVersion.of(VersionProperties.getElasticsearch()), + areaTemplate, + breakingArea, + entriesForArea + ) + ); + } + } + } + @VisibleForTesting + static String generateIndexFile(QualifiedVersion version, String template, List entries) throws IOException { final Map> deprecationsByArea = entries.stream() .map(ChangelogEntry::getDeprecation) .filter(Objects::nonNull) .sorted(comparing(ChangelogEntry.Deprecation::getTitle)) .collect(groupingBy(ChangelogEntry.Deprecation::getArea, TreeMap::new, Collectors.toList())); + final List breakingIncludeList = entries.stream() + .filter(each -> each.getBreaking() != null) + .map(each -> each.getBreaking().getArea().toLowerCase(Locale.ROOT).replaceFirst(" and", "").replaceAll(" ", "-")) + .distinct() + .sorted() + .toList(); + final Map bindings = new HashMap<>(); - bindings.put("breakingChangesByNotabilityByArea", breakingChangesByNotabilityByArea); + bindings.put("breakingIncludeList", breakingIncludeList); bindings.put("deprecationsByArea", deprecationsByArea); bindings.put("isElasticsearchSnapshot", version.isSnapshot()); bindings.put("majorDotMinor", version.getMajor() + "." + version.getMinor()); @@ -70,4 +132,28 @@ static String generateFile(QualifiedVersion version, String template, List entriesForArea + ) throws IOException { + final Map> breakingEntriesByNotability = entriesForArea.stream() + .collect( + groupingBy( + ChangelogEntry.Breaking::isNotable, + toCollection(() -> new TreeSet<>(comparing(ChangelogEntry.Breaking::getTitle))) + ) + ); + + final Map bindings = new HashMap<>(); + bindings.put("breakingArea", breakingArea); + bindings.put("breakingEntriesByNotability", breakingEntriesByNotability); + bindings.put("breakingAreaAnchor", breakingArea.toLowerCase(Locale.ROOT).replaceFirst(" and", "").replaceAll(" ", "_")); + bindings.put("majorMinor", String.valueOf(version.getMajor()) + version.getMinor()); + + return TemplateUtils.render(template, bindings); + } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java index 19b9ed2f274a4..94c77768b14b0 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ChangelogEntry.java @@ -215,6 +215,7 @@ public static class Breaking { private String details; private String impact; private boolean notable; + private boolean essSettingChange; public String getArea() { return area; @@ -260,6 +261,14 @@ public String getAnchor() { return generatedAnchor(this.title); } + public boolean isEssSettingChange() { + return essSettingChange; + } + + public void setEssSettingChange(boolean essSettingChange) { + this.essSettingChange = essSettingChange; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -273,23 +282,25 @@ public boolean equals(Object o) { && Objects.equals(area, breaking.area) && Objects.equals(title, breaking.title) && Objects.equals(details, breaking.details) - && Objects.equals(impact, breaking.impact); + && Objects.equals(impact, breaking.impact) + && Objects.equals(essSettingChange, breaking.essSettingChange); } @Override public int hashCode() { - return Objects.hash(area, title, details, impact, notable); + return Objects.hash(area, title, details, impact, notable, essSettingChange); } @Override public String toString() { return String.format( - "Breaking{area='%s', title='%s', details='%s', impact='%s', isNotable=%s}", + "Breaking{area='%s', title='%s', details='%s', impact='%s', notable=%s, essSettingChange=%s}", area, title, details, impact, - notable + notable, + essSettingChange ); } } @@ -351,7 +362,7 @@ public String toString() { } private static String generatedAnchor(String input) { - final List excludes = List.of("the", "is", "a"); + final List excludes = List.of("the", "is", "a", "and"); final String[] words = input.toLowerCase(Locale.ROOT) .replaceAll("[^\\w]+", "_") diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java index 70fafc303bcd3..7f09dbb87d3f0 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTask.java @@ -14,6 +14,8 @@ import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; import org.gradle.api.file.ConfigurableFileCollection; +import org.gradle.api.file.Directory; +import org.gradle.api.file.DirectoryProperty; import org.gradle.api.file.FileCollection; import org.gradle.api.file.RegularFile; import org.gradle.api.file.RegularFileProperty; @@ -22,6 +24,7 @@ import org.gradle.api.model.ObjectFactory; import org.gradle.api.tasks.InputFile; import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.OutputDirectory; import org.gradle.api.tasks.OutputFile; import org.gradle.api.tasks.TaskAction; import org.gradle.process.ExecOperations; @@ -55,11 +58,13 @@ public class GenerateReleaseNotesTask extends DefaultTask { private final RegularFileProperty releaseNotesTemplate; private final RegularFileProperty releaseHighlightsTemplate; private final RegularFileProperty breakingChangesTemplate; + private final RegularFileProperty breakingChangesAreaTemplate; private final RegularFileProperty releaseNotesIndexFile; private final RegularFileProperty releaseNotesFile; private final RegularFileProperty releaseHighlightsFile; - private final RegularFileProperty breakingChangesFile; + private final RegularFileProperty breakingChangesIndexFile; + private final DirectoryProperty breakingChangesDirectory; private final GitWrapper gitWrapper; @@ -71,11 +76,13 @@ public GenerateReleaseNotesTask(ObjectFactory objectFactory, ExecOperations exec releaseNotesTemplate = objectFactory.fileProperty(); releaseHighlightsTemplate = objectFactory.fileProperty(); breakingChangesTemplate = objectFactory.fileProperty(); + breakingChangesAreaTemplate = objectFactory.fileProperty(); releaseNotesIndexFile = objectFactory.fileProperty(); releaseNotesFile = objectFactory.fileProperty(); releaseHighlightsFile = objectFactory.fileProperty(); - breakingChangesFile = objectFactory.fileProperty(); + breakingChangesIndexFile = objectFactory.fileProperty(); + breakingChangesDirectory = objectFactory.directoryProperty(); gitWrapper = new GitWrapper(execOperations); } @@ -129,7 +136,9 @@ public void executeTask() throws IOException { LOGGER.info("Generating breaking changes / deprecations notes..."); BreakingChangesGenerator.update( this.breakingChangesTemplate.get().getAsFile(), - this.breakingChangesFile.get().getAsFile(), + this.breakingChangesIndexFile.get().getAsFile(), + this.breakingChangesDirectory.get().getAsFile(), + this.breakingChangesAreaTemplate.get().getAsFile(), entries ); } @@ -339,11 +348,29 @@ public void setReleaseHighlightsFile(RegularFile file) { } @OutputFile - public RegularFileProperty getBreakingChangesFile() { - return breakingChangesFile; + public RegularFileProperty getBreakingChangesIndexFile() { + return breakingChangesIndexFile; } - public void setBreakingChangesFile(RegularFile file) { - this.breakingChangesFile.set(file); + public void setBreakingChangesIndexFile(RegularFile file) { + this.breakingChangesIndexFile.set(file); + } + + public void setBreakingChangesDirectory(Directory breakingChangesDirectory) { + this.breakingChangesDirectory.set(breakingChangesDirectory); + } + + @OutputDirectory + public DirectoryProperty getBreakingChangesDirectory() { + return breakingChangesDirectory; + } + + @InputFile + public RegularFileProperty getBreakingChangesAreaTemplate() { + return breakingChangesAreaTemplate; + } + + public void setBreakingChangesAreaTemplate(RegularFile file) { + this.breakingChangesAreaTemplate.set(file); } } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java index 8f08da371ec4b..97b0b46365bda 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java @@ -84,11 +84,15 @@ public void apply(Project project) { task.setReleaseHighlightsFile(projectDirectory.file("docs/reference/release-notes/highlights.asciidoc")); task.setBreakingChangesTemplate(projectDirectory.file(RESOURCES + "templates/breaking-changes.asciidoc")); - task.setBreakingChangesFile( + task.setBreakingChangesIndexFile( projectDirectory.file( String.format("docs/reference/migration/migrate_%d_%d.asciidoc", version.getMajor(), version.getMinor()) ) ); + task.setBreakingChangesAreaTemplate(projectDirectory.file(RESOURCES + "templates/breaking-changes-area.asciidoc")); + task.setBreakingChangesDirectory( + projectDirectory.dir(String.format("docs/reference/migration/migrate_%d_%d", version.getMajor(), version.getMinor())) + ); task.dependsOn(validateChangelogsTask); }); diff --git a/build-tools-internal/src/main/resources/changelog-schema.json b/build-tools-internal/src/main/resources/changelog-schema.json index 7eb80babe3c15..e96e014fa19e3 100644 --- a/build-tools-internal/src/main/resources/changelog-schema.json +++ b/build-tools-internal/src/main/resources/changelog-schema.json @@ -157,6 +157,9 @@ }, "notable": { "type": "boolean" + }, + "ess_setting_change": { + "type": "boolean" } }, "required": [ @@ -179,6 +182,9 @@ "body": { "type": "string", "minLength": 1 + }, + "ess_setting_change": { + "type": "boolean" } }, "required": [ diff --git a/build-tools-internal/src/main/resources/templates/breaking-changes-area.asciidoc b/build-tools-internal/src/main/resources/templates/breaking-changes-area.asciidoc new file mode 100644 index 0000000000000..43d6d376bbbbe --- /dev/null +++ b/build-tools-internal/src/main/resources/templates/breaking-changes-area.asciidoc @@ -0,0 +1,39 @@ +[discrete] +[[breaking_${majorMinor}_${breakingAreaAnchor}]] +==== ${breakingArea} + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +TIP: {ess-setting-change} + +<% +[true, false].each { isNotable -> + def breakingChanges = breakingEntriesByNotability.getOrDefault(isNotable, []) + + if (breakingChanges.isEmpty() == false) { + if (isNotable) { + /* No newline here, one will be added below */ + print "// tag::notable-breaking-changes[]" + } + + for (breaking in breakingChanges) { %> +[[${ breaking.anchor }]] +. ${breaking.title}${ breaking.essSettingChange ? ' {ess-icon}' : '' } +[%collapsible] +==== +*Details* + +${breaking.details.trim()} + +*Impact* + +${breaking.impact.trim()} +==== +<% + } + + if (isNotable) { + print "// end::notable-breaking-changes[]\n" + } + } +} +%> diff --git a/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc b/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc index 38573747863e9..dc240761a5714 100644 --- a/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc +++ b/build-tools-internal/src/main/resources/templates/breaking-changes.asciidoc @@ -9,11 +9,11 @@ your application to {es} ${majorDotMinor}. See also <> and <>. <% if (isElasticsearchSnapshot) { %> -coming[${version}] +coming::[${version}] <% } %> //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide -<% if (breakingChangesByNotabilityByArea.isEmpty() == false) { %> +<% if (breakingIncludeList.isEmpty() == false) { %> [discrete] [[breaking-changes-${majorDotMinor}]] === Breaking changes @@ -29,41 +29,14 @@ Significant changes in behavior are deprecated in a minor release and the old behavior is supported until the next major release. To find out if you are using any deprecated functionality, enable <>. -<% -[true, false].each { isNotable -> - def breakingChangesByArea = breakingChangesByNotabilityByArea.getOrDefault(isNotable, []) - - breakingChangesByArea.eachWithIndex { area, breakingChanges, i -> - print "\n" - - if (isNotable) { - print "// tag::notable-breaking-changes[]\n" - } - print "[discrete]\n" - print "[[breaking_${majorMinor}_${ area.toLowerCase().replaceAll("[^a-z0-9]+", "_") }]]\n" - print "==== ${area}\n" - - for (breaking in breakingChanges) { %> -[[${ breaking.anchor }]] -.${breaking.title} -[%collapsible] -==== -*Details* + -${breaking.details.trim()} - -*Impact* + -${breaking.impact.trim()} -==== <% - } + for (include in breakingIncludeList) { + print "include::migrate_${version.major}_${version.minor}/${include}.asciidoc[]\n"; + } - if (isNotable) { - print "// end::notable-breaking-changes[]\n" - } - } -} } + if (deprecationsByArea.empty == false) { %> [discrete] diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.java index 601d3b8ed4870..7d05fbb82a328 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.java @@ -16,8 +16,8 @@ import java.util.List; import java.util.Objects; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.equalTo; -import static org.junit.Assert.assertThat; public class BreakingChangesGeneratorTest { @@ -25,17 +25,46 @@ public class BreakingChangesGeneratorTest { * Check that the breaking changes can be correctly generated. */ @Test - public void generateFile_rendersCorrectMarkup() throws Exception { + public void generateIndexFile_rendersCorrectMarkup() throws Exception { // given: final String template = getResource("/templates/breaking-changes.asciidoc"); final String expectedOutput = getResource( - "/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateFile.asciidoc" + "/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateIndexFile.asciidoc" ); final List entries = getEntries(); // when: - final String actualOutput = BreakingChangesGenerator.generateFile(QualifiedVersion.of("8.4.0-SNAPSHOT"), template, entries); + final String actualOutput = BreakingChangesGenerator.generateIndexFile(QualifiedVersion.of("8.4.0-SNAPSHOT"), template, entries); + + // then: + assertThat(actualOutput, equalTo(expectedOutput)); + } + + /** + * Check that the breaking changes for a specific area can be correctly generated. + */ + @Test + public void generateAreaFile_rendersCorrectMarkup() throws Exception { + // given: + final String template = getResource("/templates/breaking-changes-area.asciidoc"); + final String expectedOutput = getResource( + "/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateAreaFile.asciidoc" + ); + final String breakingArea = "Cluster and node setting"; + + final List entries = getEntries().stream() + .map(ChangelogEntry::getBreaking) + .filter(each -> each.getArea().equals(breakingArea)) + .toList(); + + // when: + final String actualOutput = BreakingChangesGenerator.generateBreakingAreaFile( + QualifiedVersion.of("8.4.0-SNAPSHOT"), + template, + breakingArea, + entries + ); // then: assertThat(actualOutput, equalTo(expectedOutput)); @@ -58,7 +87,7 @@ private List getEntries() { breaking2.setNotable(true); breaking2.setTitle("Breaking change number 2"); - breaking2.setArea("Cluster"); + breaking2.setArea("Cluster and node setting"); breaking2.setDetails("Breaking change details 2"); breaking2.setImpact("Breaking change impact description 2"); @@ -72,7 +101,18 @@ private List getEntries() { breaking3.setDetails("Breaking change details 3"); breaking3.setImpact("Breaking change impact description 3"); - return List.of(entry1, entry2, entry3); + ChangelogEntry entry4 = new ChangelogEntry(); + ChangelogEntry.Breaking breaking4 = new ChangelogEntry.Breaking(); + entry4.setBreaking(breaking4); + + breaking4.setNotable(true); + breaking4.setTitle("Breaking change number 4"); + breaking4.setArea("Cluster and node setting"); + breaking4.setDetails("Breaking change details 4"); + breaking4.setImpact("Breaking change impact description 4"); + breaking4.setEssSettingChange(true); + + return List.of(entry1, entry2, entry3, entry4); } private String getResource(String name) throws Exception { diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTaskTest.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTaskTest.java index 8f35997c1e7d5..d2deffdbf332f 100644 --- a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTaskTest.java +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/GenerateReleaseNotesTaskTest.java @@ -10,7 +10,6 @@ import org.elasticsearch.gradle.internal.test.GradleUnitTestCase; import org.junit.Before; -import org.junit.Ignore; import org.junit.Test; import java.io.File; @@ -34,7 +33,6 @@ import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; -@Ignore("https://github.com/elastic/elasticsearch/issues/77190") public class GenerateReleaseNotesTaskTest extends GradleUnitTestCase { private GitWrapper gitWrapper; diff --git a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateAreaFile.asciidoc b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateAreaFile.asciidoc new file mode 100644 index 0000000000000..dcd4d646d5a6a --- /dev/null +++ b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateAreaFile.asciidoc @@ -0,0 +1,33 @@ +[discrete] +[[breaking_84_cluster_node_setting]] +==== Cluster and node setting + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +TIP: {ess-setting-change} + +// tag::notable-breaking-changes[] +[[breaking_change_number_2]] +. Breaking change number 2 +[%collapsible] +==== +*Details* + +Breaking change details 2 + +*Impact* + +Breaking change impact description 2 +==== + +[[breaking_change_number_4]] +. Breaking change number 4 {ess-icon} +[%collapsible] +==== +*Details* + +Breaking change details 4 + +*Impact* + +Breaking change impact description 4 +==== +// end::notable-breaking-changes[] + diff --git a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateFile.asciidoc b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateIndexFile.asciidoc similarity index 54% rename from build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateFile.asciidoc rename to build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateIndexFile.asciidoc index 4a61c2de4016f..277833e0171be 100644 --- a/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateFile.asciidoc +++ b/build-tools-internal/src/test/resources/org/elasticsearch/gradle/internal/release/BreakingChangesGeneratorTest.generateIndexFile.asciidoc @@ -9,7 +9,7 @@ your application to {es} 8.4. See also <> and <>. -coming[8.4.0-SNAPSHOT] +coming::[8.4.0-SNAPSHOT] //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide @@ -30,52 +30,7 @@ the old behavior is supported until the next major release. To find out if you are using any deprecated functionality, enable <>. -// tag::notable-breaking-changes[] -[discrete] -[[breaking_84_api]] -==== API - -[[breaking_change_number_1]] -.Breaking change number 1 -[%collapsible] -==== -*Details* + -Breaking change details 1 - -*Impact* + -Breaking change impact description 1 -==== -// end::notable-breaking-changes[] - -// tag::notable-breaking-changes[] -[discrete] -[[breaking_84_cluster]] -==== Cluster - -[[breaking_change_number_2]] -.Breaking change number 2 -[%collapsible] -==== -*Details* + -Breaking change details 2 - -*Impact* + -Breaking change impact description 2 -==== -// end::notable-breaking-changes[] - -[discrete] -[[breaking_84_transform]] -==== Transform - -[[breaking_change_number_3]] -.Breaking change number 3 -[%collapsible] -==== -*Details* + -Breaking change details 3 - -*Impact* + -Breaking change impact description 3 -==== +include::migrate_8_4/api.asciidoc[] +include::migrate_8_4/cluster-node-setting.asciidoc[] +include::migrate_8_4/transform.asciidoc[] From d2217ebaa36ab47c4319408e7e262408da45feb7 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 23 Nov 2021 15:14:13 +0100 Subject: [PATCH 18/88] Cleanup SLM History Item .equals (#80938) There was some confusing dead code here and the field comparisons were done in a needlessly confusing manner also. --- .../slm/history/SnapshotHistoryItem.java | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java index 38c0225668538..fd24e697818b5 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java @@ -243,19 +243,14 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - boolean result; - if (this == o) result = true; - if (o == null || getClass() != o.getClass()) result = false; - SnapshotHistoryItem that1 = (SnapshotHistoryItem) o; - result = isSuccess() == that1.isSuccess() - && timestamp == that1.getTimestamp() - && Objects.equals(getPolicyId(), that1.getPolicyId()) - && Objects.equals(getRepository(), that1.getRepository()) - && Objects.equals(getSnapshotName(), that1.getSnapshotName()) - && Objects.equals(getOperation(), that1.getOperation()); - if (result == false) return false; SnapshotHistoryItem that = (SnapshotHistoryItem) o; - return Objects.equals(getSnapshotConfiguration(), that.getSnapshotConfiguration()) + return isSuccess() == that.isSuccess() + && timestamp == that.getTimestamp() + && Objects.equals(getPolicyId(), that.getPolicyId()) + && Objects.equals(getRepository(), that.getRepository()) + && Objects.equals(getSnapshotName(), that.getSnapshotName()) + && Objects.equals(getOperation(), that.getOperation()) + && Objects.equals(getSnapshotConfiguration(), that.getSnapshotConfiguration()) && Objects.equals(getErrorDetails(), that.getErrorDetails()); } From 30ed6b07f7ae160756c01f537baedf76364167e8 Mon Sep 17 00:00:00 2001 From: weizijun Date: Tue, 23 Nov 2021 23:23:12 +0800 Subject: [PATCH 19/88] TSDB: Tests for nanosecond timeprecision timestamp just beyond the limit (#80932) add some tests for the nanosecond case --- .../rest-api-spec/test/tsdb/10_settings.yml | 159 ++++++++++++++++++ .../elasticsearch/index/IndexSettings.java | 2 +- .../DataStreamTimestampFieldMapper.java | 24 ++- 3 files changed, 181 insertions(+), 4 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml index 4df1579cd4597..a1ac1f363d19b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml @@ -230,3 +230,162 @@ set start_time and end_time without timeseries mode: index: time_series: end_time: 1632625782000 + +--- +check start_time and end_time with data_nano: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + - do: + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + routing_path: [metricset] + time_series: + start_time: 2021-09-26T03:09:42Z + end_time: 2021-09-26T03:09:52Z + mappings: + properties: + "@timestamp": + type: date_nanos + metricset: + type: keyword + time_series_dimension: true + + - do: + index: + refresh: true + index: test_index + body: { + "@timestamp": "2021-09-26T03:09:51.123456789Z", + "metricset": "pod" + } + + - do: + search: + index: test_index + body: + docvalue_fields: [ '@timestamp' ] + - match: { hits.total.value: 1 } + - match: { "hits.hits.0.fields.@timestamp": [ "2021-09-26T03:09:51.123456789Z" ] } + + - do: + catch: /time series index @timestamp value \[2010-09-26T03:09:52.123456789Z\] must be larger than 2021-09-26T03:09:42Z/ + index: + index: test_index + body: { + "@timestamp": "2010-09-26T03:09:52.123456789Z", + "metricset": "pod" + } + + - do: + catch: /time series index @timestamp value \[2031-09-26T03:09:52.123456789Z\] must be smaller than 2021-09-26T03:09:52Z/ + index: + index: test_index + body: { + "@timestamp": "2031-09-26T03:09:52.123456789Z", + "metricset": "pod" + } + +--- +check start_time boundary with data_nano: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + - do: + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + routing_path: [metricset] + time_series: + start_time: 2021-09-26T03:09:42Z + end_time: 2021-09-26T03:09:52Z + mappings: + properties: + "@timestamp": + type: date_nanos + metricset: + type: keyword + time_series_dimension: true + + - do: + index: + refresh: true + index: test_index + body: { + "@timestamp": "2021-09-26T03:09:42.123456789Z", + "metricset": "pod" + } + + - do: + search: + index: test_index + body: + docvalue_fields: [ '@timestamp' ] + - match: { hits.total.value: 1 } + - match: { "hits.hits.0.fields.@timestamp": [ "2021-09-26T03:09:42.123456789Z" ] } + + - do: + catch: /time series index @timestamp value \[2021-09-26T03:09:41.123456789Z\] must be larger than 2021-09-26T03:09:42Z/ + index: + index: test_index + body: { + "@timestamp": "2021-09-26T03:09:41.123456789Z", + "metricset": "pod" + } + +--- +check end_time boundary with data_nano: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + - do: + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + routing_path: [metricset] + time_series: + start_time: 2021-09-26T03:09:42Z + end_time: 2021-09-26T03:09:52Z + mappings: + properties: + "@timestamp": + type: date_nanos + metricset: + type: keyword + time_series_dimension: true + + - do: + index: + refresh: true + index: test_index + body: { + "@timestamp": "2021-09-26T03:09:51.123456789Z", + "metricset": "pod" + } + + - do: + search: + index: test_index + body: + docvalue_fields: [ '@timestamp' ] + - match: { hits.total.value: 1 } + - match: { "hits.hits.0.fields.@timestamp": [ "2021-09-26T03:09:51.123456789Z" ] } + + - do: + catch: /time series index @timestamp value \[2021-09-26T03:09:52.123456789Z\] must be smaller than 2021-09-26T03:09:52Z/ + index: + index: test_index + body: { + "@timestamp": "2021-09-26T03:09:52.123456789Z", + "metricset": "pod" + } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 23df096597588..8b56a9e3d292b 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -492,7 +492,7 @@ public static boolean isTimeSeriesModeEnabled() { */ public static final Setting TIME_SERIES_END_TIME = Setting.dateSetting( "index.time_series.end_time", - DateUtils.MAX_NANOSECOND_INSTANT, + Instant.ofEpochMilli(DateUtils.MAX_MILLIS_BEFORE_9999), new Setting.Validator<>() { @Override public void validate(Instant value) {} diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DataStreamTimestampFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DataStreamTimestampFieldMapper.java index 5b7b0e5fe6273..9bbe0f2f41889 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DataStreamTimestampFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DataStreamTimestampFieldMapper.java @@ -14,12 +14,14 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.mapper.DateFieldMapper.Resolution; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.io.UncheckedIOException; +import java.time.Instant; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -215,19 +217,35 @@ private void validateTimestamp(IndexableField field, DocumentParserContext conte return; } - long value = field.numericValue().longValue(); + long originValue = field.numericValue().longValue(); + long value = originValue; + + Resolution resolution; if (context.mappingLookup().getMapper(DEFAULT_PATH).typeName().equals(DateFieldMapper.DATE_NANOS_CONTENT_TYPE)) { + resolution = Resolution.NANOSECONDS; value /= NSEC_PER_MSEC; + } else { + resolution = Resolution.MILLISECONDS; } long startTime = context.indexSettings().getTimeSeriesStartTime(); if (value < startTime) { - throw new IllegalArgumentException("time series index @timestamp value [" + value + "] must be larger than " + startTime); + throw new IllegalArgumentException( + "time series index @timestamp value [" + + resolution.toInstant(originValue) + + "] must be larger than " + + Instant.ofEpochMilli(startTime) + ); } long endTime = context.indexSettings().getTimeSeriesEndTime(); if (value >= endTime) { - throw new IllegalArgumentException("time series index @timestamp value [" + value + "] must be smaller than " + endTime); + throw new IllegalArgumentException( + "time series index @timestamp value [" + + resolution.toInstant(originValue) + + "] must be smaller than " + + Instant.ofEpochMilli(endTime) + ); } } From 2ce9cd339ab436c652379ff714e7a32c2601a4b3 Mon Sep 17 00:00:00 2001 From: Howard Date: Tue, 23 Nov 2021 23:25:36 +0800 Subject: [PATCH 20/88] Remove unnecessary shuffle in unassigned shards allocation. (#65172) Shuffling to avoid poisonous shards no longer has any effect, since we always sort unassigned shards before iterating them for allocation. --- .../org/elasticsearch/cluster/routing/RoutingNodes.java | 6 ------ .../cluster/routing/allocation/AllocationService.java | 8 -------- 2 files changed, 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index d61a21d0930aa..f71ff55ec68a4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -18,7 +18,6 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.Randomness; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; @@ -1068,11 +1067,6 @@ public boolean isIgnoredEmpty() { return ignored.isEmpty(); } - public void shuffle() { - nodes.ensureMutable(); - Randomness.shuffle(unassigned); - } - /** * Drains all unassigned shards and returns it. * This method will not drain ignored shards. diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 882cc976f1924..82c2841f40e70 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -117,8 +117,6 @@ public ClusterState applyStartedShards(ClusterState clusterState, List Date: Tue, 23 Nov 2021 15:26:28 +0000 Subject: [PATCH 21/88] Remove obsolete typed legacy index templates (#80937) This removes a few legacy index templates that were superseded by equivalent component templates or updated index templates. --- .../xpack/logstash/Logstash.java | 12 +++++++ .../xpack/monitoring/Monitoring.java | 15 +++++++++ .../xpack/security/Security.java | 2 ++ .../elasticsearch/xpack/watcher/Watcher.java | 5 +++ .../upgrades/WatcherRestartIT.java | 32 +++++++++++++++++++ 5 files changed, 66 insertions(+) diff --git a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java index a0bcf9f866f85..4eb9d058aa2dc 100644 --- a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java +++ b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/Logstash.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -41,7 +42,9 @@ import java.io.UncheckedIOException; import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.function.Supplier; +import java.util.function.UnaryOperator; import static org.elasticsearch.index.engine.EngineConfig.INDEX_CODEC_SETTING; import static org.elasticsearch.index.mapper.MapperService.SINGLE_MAPPING_NAME; @@ -191,4 +194,13 @@ public String getFeatureName() { public String getFeatureDescription() { return "Enables Logstash Central Management pipeline storage"; } + + @Override + public UnaryOperator> getIndexTemplateMetadataUpgrader() { + return templates -> { + // .logstash is a system index now. deleting the legacy template + templates.remove("logstash-index-template"); + return templates; + }; + } } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java index 500fdc47bb6c3..c487e8bca5701 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -72,6 +73,7 @@ import java.util.Map; import java.util.Set; import java.util.function.Supplier; +import java.util.function.UnaryOperator; import static org.elasticsearch.common.settings.Setting.boolSetting; @@ -236,4 +238,17 @@ public void reload(Settings settings) throws Exception { exporters.setExportersSetting(settingsForChangedExporter); } } + + @Override + public UnaryOperator> getIndexTemplateMetadataUpgrader() { + return map -> { + // this template was not migrated to typeless due to the possibility of the old /_monitoring/bulk API being used + // see {@link org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils#OLD_TEMPLATE_VERSION} + // however the bulk API is not typed (the type field is for the docs, a field inside the docs) so it's safe to remove this + // old template and rely on the updated, typeless, .monitoring-alerts-7 template + map.remove(".monitoring-alerts"); + return map; + }; + + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index f125fd186592e..f4858acc9cf46 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -1531,6 +1531,8 @@ public UnaryOperator> getIndexTemplateMetadat return templates -> { // .security index is not managed by using templates anymore templates.remove("security_audit_log"); + // .security is a system index now. deleting another legacy template that's not used anymore + templates.remove("security-index-template"); return templates; }; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 0b770f857cc8f..dccde9f18a796 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -716,6 +716,11 @@ public void onIndexModule(IndexModule module) { public UnaryOperator> getIndexTemplateMetadataUpgrader() { return map -> { map.keySet().removeIf(name -> name.startsWith("watch_history_")); + // watcher migrated to using system indices so these legacy templates are not needed anymore + map.remove(".watches"); + map.remove(".triggered_watches"); + // post 7.x we moved to typeless watch-history-10 + map.remove(".watch-history-9"); return map; }; } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatcherRestartIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatcherRestartIT.java index a255f96a10221..54921de6b9320 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatcherRestartIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatcherRestartIT.java @@ -9,10 +9,14 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import java.nio.charset.StandardCharsets; +import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; public class WatcherRestartIT extends AbstractUpgradeTestCase { @@ -25,6 +29,34 @@ public void testWatcherRestart() throws Exception { ensureWatcherStarted(); } + public void testEnsureWatcherDeletesLegacyTemplates() throws Exception { + client().performRequest(new Request("POST", "/_watcher/_start")); + ensureWatcherStarted(); + + if (CLUSTER_TYPE.equals(ClusterType.UPGRADED)) { + // legacy index template created in previous releases should not be present anymore + assertBusy(() -> { + Request request = new Request("GET", "/_template/*watch*"); + try { + Response response = client().performRequest(request); + Map responseLevel = entityAsMap(response); + assertNotNull(responseLevel); + + assertThat(responseLevel.containsKey(".watches"), is(false)); + assertThat(responseLevel.containsKey(".triggered_watches"), is(false)); + assertThat(responseLevel.containsKey(".watch-history-9"), is(false)); + } catch (ResponseException e) { + // Not found is fine + assertThat( + "Unexpected failure getting templates: " + e.getResponse().getStatusLine(), + e.getResponse().getStatusLine().getStatusCode(), + is(404) + ); + } + }, 30, TimeUnit.SECONDS); + } + } + private void ensureWatcherStopped() throws Exception { assertBusy(() -> { Response stats = client().performRequest(new Request("GET", "_watcher/stats")); From 2e8a973d4aa35d97275d32a29b851b7f8d9157aa Mon Sep 17 00:00:00 2001 From: Alan Woodward Date: Tue, 23 Nov 2021 15:52:48 +0000 Subject: [PATCH 22/88] Reinterpret dots in field names as object structure (#79922) DocumentParser parses documents by following their object hierarchy, and using a parallel hierarchy of ObjectMappers to work out how to map leaf fields. Field names that contain dots complicate this, meaning that many methods need to reverse-engineer the object hierarchy to check that the current parent object mapper is the correct one; this is particularly complex when objects are being created dynamically. To simplify this logic, this commit introduces a DotExpandingXContentParser, which wraps another XContentParser and re-interprets any field name containing dots as a series of objects. So for example, `"foo.bar.baz":{ ... }` is represented as `"foo":{"bar":{"baz":{...}}}`. DocumentParser uses this to automatically expand all field names containing dots when parsing the source. --- .../xcontent/DelegatingXContentParser.java | 244 +++++++++++++++ .../xcontent/DotExpandingXContentParser.java | 201 +++++++++++++ .../DotExpandingXContentParserTests.java | 84 ++++++ rest-api-spec/build.gradle | 1 + .../test/bulk/11_dynamic_templates.yml | 6 +- .../index/mapper/DocumentParser.java | 284 ++++-------------- .../index/mapper/DocumentParserContext.java | 90 +++++- .../index/mapper/NumberFieldMapper.java | 3 + .../index/mapper/DocumentParserTests.java | 87 ++---- .../index/mapper/NumberFieldMapperTests.java | 3 +- .../flattened/FlattenedFieldMapperTests.java | 22 ++ 11 files changed, 714 insertions(+), 311 deletions(-) create mode 100644 libs/x-content/src/main/java/org/elasticsearch/xcontent/DelegatingXContentParser.java create mode 100644 libs/x-content/src/main/java/org/elasticsearch/xcontent/DotExpandingXContentParser.java create mode 100644 libs/x-content/src/test/java/org/elasticsearch/xcontent/DotExpandingXContentParserTests.java diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/DelegatingXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/DelegatingXContentParser.java new file mode 100644 index 0000000000000..1a87920947db1 --- /dev/null +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/DelegatingXContentParser.java @@ -0,0 +1,244 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.xcontent; + +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.core.RestApiVersion; + +import java.io.IOException; +import java.nio.CharBuffer; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +public abstract class DelegatingXContentParser implements XContentParser { + + protected abstract XContentParser delegate(); + + @Override + public XContentType contentType() { + return delegate().contentType(); + } + + @Override + public void allowDuplicateKeys(boolean allowDuplicateKeys) { + delegate().allowDuplicateKeys(allowDuplicateKeys); + } + + @Override + public Token nextToken() throws IOException { + return delegate().nextToken(); + } + + @Override + public void skipChildren() throws IOException { + delegate().skipChildren(); + } + + @Override + public Token currentToken() { + return delegate().currentToken(); + } + + @Override + public String currentName() throws IOException { + return delegate().currentName(); + } + + @Override + public Map map() throws IOException { + return delegate().map(); + } + + @Override + public Map mapOrdered() throws IOException { + return delegate().mapOrdered(); + } + + @Override + public Map mapStrings() throws IOException { + return delegate().mapStrings(); + } + + @Override + public Map map(Supplier> mapFactory, CheckedFunction mapValueParser) + throws IOException { + return delegate().map(mapFactory, mapValueParser); + } + + @Override + public List list() throws IOException { + return delegate().list(); + } + + @Override + public List listOrderedMap() throws IOException { + return delegate().listOrderedMap(); + } + + @Override + public String text() throws IOException { + return delegate().text(); + } + + @Override + public String textOrNull() throws IOException { + return delegate().textOrNull(); + } + + @Override + public CharBuffer charBufferOrNull() throws IOException { + return delegate().charBufferOrNull(); + } + + @Override + public CharBuffer charBuffer() throws IOException { + return delegate().charBuffer(); + } + + @Override + public Object objectText() throws IOException { + return delegate().objectText(); + } + + @Override + public Object objectBytes() throws IOException { + return delegate().objectBytes(); + } + + @Override + public boolean hasTextCharacters() { + return delegate().hasTextCharacters(); + } + + @Override + public char[] textCharacters() throws IOException { + return delegate().textCharacters(); + } + + @Override + public int textLength() throws IOException { + return delegate().textLength(); + } + + @Override + public int textOffset() throws IOException { + return delegate().textOffset(); + } + + @Override + public Number numberValue() throws IOException { + return delegate().numberValue(); + } + + @Override + public NumberType numberType() throws IOException { + return delegate().numberType(); + } + + @Override + public short shortValue(boolean coerce) throws IOException { + return delegate().shortValue(coerce); + } + + @Override + public int intValue(boolean coerce) throws IOException { + return delegate().intValue(coerce); + } + + @Override + public long longValue(boolean coerce) throws IOException { + return delegate().longValue(coerce); + } + + @Override + public float floatValue(boolean coerce) throws IOException { + return delegate().floatValue(coerce); + } + + @Override + public double doubleValue(boolean coerce) throws IOException { + return delegate().doubleValue(coerce); + } + + @Override + public short shortValue() throws IOException { + return delegate().shortValue(); + } + + @Override + public int intValue() throws IOException { + return delegate().intValue(); + } + + @Override + public long longValue() throws IOException { + return delegate().longValue(); + } + + @Override + public float floatValue() throws IOException { + return delegate().floatValue(); + } + + @Override + public double doubleValue() throws IOException { + return delegate().doubleValue(); + } + + @Override + public boolean isBooleanValue() throws IOException { + return delegate().isBooleanValue(); + } + + @Override + public boolean booleanValue() throws IOException { + return delegate().booleanValue(); + } + + @Override + public byte[] binaryValue() throws IOException { + return delegate().binaryValue(); + } + + @Override + public XContentLocation getTokenLocation() { + return delegate().getTokenLocation(); + } + + @Override + public T namedObject(Class categoryClass, String name, Object context) throws IOException { + return delegate().namedObject(categoryClass, name, context); + } + + @Override + public NamedXContentRegistry getXContentRegistry() { + return delegate().getXContentRegistry(); + } + + @Override + public boolean isClosed() { + return delegate().isClosed(); + } + + @Override + public RestApiVersion getRestApiVersion() { + return delegate().getRestApiVersion(); + } + + @Override + public DeprecationHandler getDeprecationHandler() { + return delegate().getDeprecationHandler(); + } + + @Override + public void close() throws IOException { + delegate().close(); + } +} diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/DotExpandingXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/DotExpandingXContentParser.java new file mode 100644 index 0000000000000..704edfd019c9a --- /dev/null +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/DotExpandingXContentParser.java @@ -0,0 +1,201 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.xcontent; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.Deque; + +/** + * An XContentParser that reinterprets field names containing dots as an object structure. + * + * A fieldname named {@code "foo.bar.baz":...} will be parsed instead as {@code 'foo':{'bar':{'baz':...}}} + */ +public class DotExpandingXContentParser extends FilterXContentParser { + + private static class WrappingParser extends DelegatingXContentParser { + + final Deque parsers = new ArrayDeque<>(); + + WrappingParser(XContentParser in) throws IOException { + parsers.push(in); + if (in.currentToken() == Token.FIELD_NAME) { + expandDots(); + } + } + + @Override + public Token nextToken() throws IOException { + Token token; + while ((token = delegate().nextToken()) == null) { + parsers.pop(); + if (parsers.isEmpty()) { + return null; + } + } + if (token != Token.FIELD_NAME) { + return token; + } + expandDots(); + return Token.FIELD_NAME; + } + + private void expandDots() throws IOException { + String field = delegate().currentName(); + String[] subpaths = field.split("\\."); + if (subpaths.length == 0) { + throw new IllegalArgumentException("field name cannot contain only dots: [" + field + "]"); + } + if (subpaths.length == 1) { + return; + } + Token token = delegate().nextToken(); + if (token == Token.START_OBJECT || token == Token.START_ARRAY) { + parsers.push(new DotExpandingXContentParser(new XContentSubParser(delegate()), delegate(), subpaths)); + } else if (token == Token.END_OBJECT || token == Token.END_ARRAY) { + throw new IllegalStateException("Expecting START_OBJECT or START_ARRAY or VALUE but got [" + token + "]"); + } else { + parsers.push(new DotExpandingXContentParser(new SingletonValueXContentParser(delegate()), delegate(), subpaths)); + } + } + + @Override + protected XContentParser delegate() { + return parsers.peek(); + } + } + + /** + * Wraps an XContentParser such that it re-interprets dots in field names as an object structure + * @param in the parser to wrap + * @return the wrapped XContentParser + */ + public static XContentParser expandDots(XContentParser in) throws IOException { + return new WrappingParser(in); + } + + private enum State { + PRE, + DURING, + POST + } + + final String[] subPaths; + final XContentParser subparser; + + int level = 0; + private State state = State.PRE; + + private DotExpandingXContentParser(XContentParser subparser, XContentParser root, String[] subPaths) { + super(root); + this.subPaths = subPaths; + this.subparser = subparser; + } + + @Override + public Token nextToken() throws IOException { + if (state == State.PRE) { + level++; + if (level == subPaths.length * 2 - 1) { + state = State.DURING; + return in.currentToken(); + } + if (level % 2 == 0) { + return Token.FIELD_NAME; + } + return Token.START_OBJECT; + } + if (state == State.DURING) { + Token token = subparser.nextToken(); + if (token != null) { + return token; + } + state = State.POST; + } + assert state == State.POST; + if (level >= 1) { + level -= 2; + } + return level < 0 ? null : Token.END_OBJECT; + } + + @Override + public Token currentToken() { + if (state == State.PRE) { + return level % 2 == 1 ? Token.START_OBJECT : Token.FIELD_NAME; + } + if (state == State.POST) { + if (level > 1) { + return Token.END_OBJECT; + } + } + return in.currentToken(); + } + + @Override + public String currentName() throws IOException { + if (state == State.DURING) { + return in.currentName(); + } + if (state == State.POST) { + if (level <= 1) { + return in.currentName(); + } + throw new IllegalStateException("Can't get current name during END_OBJECT"); + } + return subPaths[level / 2]; + } + + @Override + public void skipChildren() throws IOException { + if (state == State.PRE) { + in.skipChildren(); + state = State.POST; + } + if (state == State.DURING) { + subparser.skipChildren(); + } + } + + @Override + public String textOrNull() throws IOException { + if (state == State.PRE) { + throw new IllegalStateException("Can't get text on a " + currentToken() + " at " + getTokenLocation()); + } + return super.textOrNull(); + } + + @Override + public Number numberValue() throws IOException { + if (state == State.PRE) { + throw new IllegalStateException("Can't get numeric value on a " + currentToken() + " at " + getTokenLocation()); + } + return super.numberValue(); + } + + @Override + public boolean booleanValue() throws IOException { + if (state == State.PRE) { + throw new IllegalStateException("Can't get boolean value on a " + currentToken() + " at " + getTokenLocation()); + } + return super.booleanValue(); + } + + private static class SingletonValueXContentParser extends FilterXContentParser { + + protected SingletonValueXContentParser(XContentParser in) { + super(in); + } + + @Override + public Token nextToken() throws IOException { + return null; + } + } +} diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/DotExpandingXContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/DotExpandingXContentParserTests.java new file mode 100644 index 0000000000000..bc346cb2d0fab --- /dev/null +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/DotExpandingXContentParserTests.java @@ -0,0 +1,84 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.xcontent; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; + +public class DotExpandingXContentParserTests extends ESTestCase { + + private void assertXContentMatches(String expected, String actual) throws IOException { + XContentParser inputParser = createParser(JsonXContent.jsonXContent, actual); + XContentParser expandedParser = DotExpandingXContentParser.expandDots(inputParser); + + XContentBuilder actualOutput = XContentBuilder.builder(JsonXContent.jsonXContent).copyCurrentStructure(expandedParser); + assertEquals(expected, Strings.toString(actualOutput)); + } + + public void testEmbeddedObject() throws IOException { + + assertXContentMatches( + "{\"test\":{\"with\":{\"dots\":{\"field\":\"value\"}}},\"nodots\":\"value2\"}", + "{\"test.with.dots\":{\"field\":\"value\"},\"nodots\":\"value2\"}" + ); + } + + public void testEmbeddedArray() throws IOException { + + assertXContentMatches( + "{\"test\":{\"with\":{\"dots\":[\"field\",\"value\"]}},\"nodots\":\"value2\"}", + "{\"test.with.dots\":[\"field\",\"value\"],\"nodots\":\"value2\"}" + ); + + } + + public void testEmbeddedValue() throws IOException { + + assertXContentMatches( + "{\"test\":{\"with\":{\"dots\":\"value\"}},\"nodots\":\"value2\"}", + "{\"test.with.dots\":\"value\",\"nodots\":\"value2\"}" + ); + + } + + public void testSkipChildren() throws IOException { + XContentParser parser = DotExpandingXContentParser.expandDots( + createParser(JsonXContent.jsonXContent, "{ \"test.with.dots\" : \"value\", \"nodots\" : \"value2\" }") + ); + + parser.nextToken(); // start object + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("test", parser.currentName()); + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.START_OBJECT, parser.currentToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("with", parser.currentName()); + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.START_OBJECT, parser.currentToken()); + parser.skipChildren(); + assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals("nodots", parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + assertEquals("value2", parser.text()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertNull(parser.nextToken()); + } + + public void testNestedExpansions() throws IOException { + assertXContentMatches( + "{\"first\":{\"dot\":{\"second\":{\"dot\":\"value\"},\"third\":\"value\"}},\"nodots\":\"value\"}", + "{\"first.dot\":{\"second.dot\":\"value\",\"third\":\"value\"},\"nodots\":\"value\"}" + ); + } +} diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 0f26777f04ec7..e9ca901a49660 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -49,6 +49,7 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> task.skipTestsByFilePattern("**/indices.upgrade/*.yml", "upgrade api will only get a dummy endpoint returning an exception suggesting to use _reindex") task.skipTestsByFilePattern("**/indices.stats/60_field_usage/*/*.yml", "field usage results will be different between lucene versions") + task.skipTest("bulk/11_dynamic_templates/Dynamic templates", "Error message has changed") task.skipTest("indices.create/20_mix_typeless_typeful/Implicitly create a typed index while there is a typeless template", "Type information about the type is removed and not passed down. The logic to check for this is also removed.") task.skipTest("indices.create/20_mix_typeless_typeful/Implicitly create a typeless index while there is a typed template", "Type information about the type is removed and not passed down. The logic to check for this is also removed.") task.skipTest("delete/70_mix_typeless_typeful/DELETE with typeless API on an index that has types", "Type information about the type is removed and not passed down. The logic to check for this is also removed."); diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml index 1560b575e4498..ef904b341deb6 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/bulk/11_dynamic_templates.yml @@ -1,8 +1,8 @@ --- "Dynamic templates": - skip: - version: " - 7.12.99" - reason: "Dynamic templates parameter is added to bulk requests in 7.13" + version: " - 8.1.0" + reason: "Error message has changed in 8.1.0" - do: indices.create: @@ -166,6 +166,6 @@ - match: { errors: true } - match: { items.0.index.status: 400 } - match: { items.0.index.error.type: mapper_parsing_exception } - - match: { items.0.index.error.reason: "Field [foo] must be an object; but it's configured as [keyword] in dynamic template [string]"} + - match: { items.0.index.error.reason: "failed to parse field [foo] of type [keyword] in document with id 'id_11'. Preview of field's value: '{bar=hello world}'"} - match: { items.1.index.status: 201 } - match: { items.1.index.result: created } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index c4ab9e6095db0..f0d5c4c1dc4ac 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -19,13 +19,13 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.xcontent.DotExpandingXContentParser; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -85,10 +85,10 @@ public ParsedDocument parseDocument(SourceToParse source, MappingLookup mappingL ) ) { context = new InternalDocumentParserContext(mappingLookup, indexSettings, indexAnalyzers, dateParserContext, source, parser); - validateStart(parser); + validateStart(context.parser()); MetadataFieldMapper[] metadataFieldsMappers = mappingLookup.getMapping().getSortedMetadataMappers(); - internalParseDocument(mappingLookup.getMapping().getRoot(), metadataFieldsMappers, context, parser); - validateEnd(parser); + internalParseDocument(mappingLookup.getMapping().getRoot(), metadataFieldsMappers, context); + validateEnd(context.parser()); } catch (Exception e) { throw wrapInMapperParsingException(source, e); } @@ -109,28 +109,13 @@ public ParsedDocument parseDocument(SourceToParse source, MappingLookup mappingL ); } - private static boolean containsDisabledObjectMapper(ObjectMapper objectMapper, String[] subfields) { - for (int i = 0; i < subfields.length - 1; ++i) { - Mapper mapper = objectMapper.getMapper(subfields[i]); - if (mapper instanceof ObjectMapper == false) { - break; - } - objectMapper = (ObjectMapper) mapper; - if (objectMapper.isEnabled() == false) { - return true; - } - } - return false; - } - private static void internalParseDocument( RootObjectMapper root, MetadataFieldMapper[] metadataFieldsMappers, - DocumentParserContext context, - XContentParser parser + DocumentParserContext context ) throws IOException { - final boolean emptyDoc = isEmptyDoc(root, parser); + final boolean emptyDoc = isEmptyDoc(root, context.parser()); for (MetadataFieldMapper metadataMapper : metadataFieldsMappers) { metadataMapper.preParse(context); @@ -138,7 +123,7 @@ private static void internalParseDocument( if (root.isEnabled() == false) { // entire type is disabled - parser.skipChildren(); + context.parser().skipChildren(); } else if (emptyDoc == false) { parseObjectOrNested(context, root); } @@ -457,39 +442,32 @@ static void parseObjectOrNested(DocumentParserContext context, ObjectMapper mapp } if (token == XContentParser.Token.START_OBJECT) { // if we are just starting an OBJECT, advance, this is the object we are parsing, we need the name first - token = parser.nextToken(); + parser.nextToken(); } - innerParseObject(context, mapper, parser, currentFieldName, token); + innerParseObject(context, mapper); // restore the enable path flag if (mapper.isNested()) { nested(context, (NestedObjectMapper) mapper); } } - private static void innerParseObject( - DocumentParserContext context, - ObjectMapper mapper, - XContentParser parser, - String currentFieldName, - XContentParser.Token token - ) throws IOException { + private static void innerParseObject(DocumentParserContext context, ObjectMapper mapper) throws IOException { + + XContentParser.Token token = context.parser().currentToken(); + String currentFieldName = context.parser().currentName(); assert token == XContentParser.Token.FIELD_NAME || token == XContentParser.Token.END_OBJECT; - String[] paths = null; + while (token != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - paths = splitAndValidatePath(currentFieldName); - if (containsDisabledObjectMapper(mapper, paths)) { - parser.nextToken(); - parser.skipChildren(); - } + currentFieldName = context.parser().currentName(); + splitAndValidatePath(currentFieldName); } else if (token == XContentParser.Token.START_OBJECT) { - parseObject(context, mapper, currentFieldName, paths); + parseObject(context, mapper, currentFieldName); } else if (token == XContentParser.Token.START_ARRAY) { - parseArray(context, mapper, currentFieldName, paths); + parseArray(context, mapper, currentFieldName); } else if (token == XContentParser.Token.VALUE_NULL) { - parseNullValue(context, mapper, currentFieldName, paths); + parseNullValue(context, mapper, currentFieldName); } else if (token == null) { throw new MapperParsingException( "object mapping for [" @@ -499,9 +477,9 @@ private static void innerParseObject( + "] as object, but got EOF, has a concrete value been provided to it?" ); } else if (token.isValue()) { - parseValue(context, mapper, currentFieldName, token, paths); + parseValue(context, mapper, currentFieldName, token); } - token = parser.nextToken(); + token = context.parser().nextToken(); } } @@ -577,7 +555,8 @@ static void parseObjectOrField(DocumentParserContext context, Mapper mapper) thr parseCopyFields(context, copyToFields); } } else if (mapper instanceof FieldAliasMapper) { - throw new IllegalArgumentException("Cannot write to a field alias [" + mapper.name() + "]."); + String verb = context.isWithinCopyTo() ? "copy" : "write"; + throw new MapperParsingException("Cannot " + verb + " to a field alias [" + mapper.name() + "]."); } else { throw new IllegalStateException( "The provided mapper [" + mapper.name() + "] has an unrecognized type [" + mapper.getClass().getSimpleName() + "]." @@ -585,23 +564,19 @@ static void parseObjectOrField(DocumentParserContext context, Mapper mapper) thr } } - private static void parseObject(final DocumentParserContext context, ObjectMapper mapper, String currentFieldName, String[] paths) - throws IOException { + private static void parseObject(final DocumentParserContext context, ObjectMapper mapper, String currentFieldName) throws IOException { assert currentFieldName != null; - Mapper objectMapper = getMapper(context, mapper, currentFieldName, paths); + Mapper objectMapper = getMapper(context, mapper, currentFieldName); if (objectMapper != null) { context.path().add(currentFieldName); parseObjectOrField(context, objectMapper); context.path().remove(); } else { - currentFieldName = paths[paths.length - 1]; - Tuple parentMapperTuple = getDynamicParentMapper(context, paths, mapper); - ObjectMapper parentMapper = parentMapperTuple.v2(); - ObjectMapper.Dynamic dynamic = dynamicOrDefault(parentMapper, context); + ObjectMapper.Dynamic dynamic = dynamicOrDefault(mapper, context); if (dynamic == ObjectMapper.Dynamic.STRICT) { throw new StrictDynamicMappingException(mapper.fullPath(), currentFieldName); } else if (dynamic == ObjectMapper.Dynamic.FALSE) { - failIfMatchesRoutingPath(context, parentMapper, currentFieldName); + failIfMatchesRoutingPath(context, mapper, currentFieldName); // not dynamic, read everything up to end object context.parser().skipChildren(); } else { @@ -614,21 +589,20 @@ private static void parseObject(final DocumentParserContext context, ObjectMappe dynamicObjectMapper = dynamic.getDynamicFieldsBuilder().createDynamicObjectMapper(context, currentFieldName); context.addDynamicMapper(dynamicObjectMapper); } + if (dynamicObjectMapper instanceof NestedObjectMapper && context.isWithinCopyTo()) { + throw new MapperParsingException( + "It is forbidden to create dynamic nested objects ([" + dynamicObjectMapper.name() + "]) through `copy_to`" + ); + } context.path().add(currentFieldName); parseObjectOrField(context, dynamicObjectMapper); context.path().remove(); } - for (int i = 0; i < parentMapperTuple.v1(); i++) { - context.path().remove(); - } } } - private static void parseArray(DocumentParserContext context, ObjectMapper parentMapper, String lastFieldName, String[] paths) - throws IOException { - String arrayFieldName = lastFieldName; - - Mapper mapper = getLeafMapper(context, parentMapper, lastFieldName, paths); + private static void parseArray(DocumentParserContext context, ObjectMapper parentMapper, String lastFieldName) throws IOException { + Mapper mapper = getLeafMapper(context, parentMapper, lastFieldName); if (mapper != null) { // There is a concrete mapper for this field already. Need to check if the mapper // expects an array, if so we pass the context straight to the mapper and if not @@ -636,38 +610,31 @@ private static void parseArray(DocumentParserContext context, ObjectMapper paren if (parsesArrayValue(mapper)) { parseObjectOrField(context, mapper); } else { - parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + parseNonDynamicArray(context, parentMapper, lastFieldName, lastFieldName); } } else { - arrayFieldName = paths[paths.length - 1]; - lastFieldName = arrayFieldName; - Tuple parentMapperTuple = getDynamicParentMapper(context, paths, parentMapper); - parentMapper = parentMapperTuple.v2(); ObjectMapper.Dynamic dynamic = dynamicOrDefault(parentMapper, context); if (dynamic == ObjectMapper.Dynamic.STRICT) { - throw new StrictDynamicMappingException(parentMapper.fullPath(), arrayFieldName); + throw new StrictDynamicMappingException(parentMapper.fullPath(), lastFieldName); } else if (dynamic == ObjectMapper.Dynamic.FALSE) { // TODO: shouldn't this skip, not parse? - parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + parseNonDynamicArray(context, parentMapper, lastFieldName, lastFieldName); } else { - Mapper objectMapperFromTemplate = dynamic.getDynamicFieldsBuilder().createObjectMapperFromTemplate(context, arrayFieldName); + Mapper objectMapperFromTemplate = dynamic.getDynamicFieldsBuilder().createObjectMapperFromTemplate(context, lastFieldName); if (objectMapperFromTemplate == null) { - parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + parseNonDynamicArray(context, parentMapper, lastFieldName, lastFieldName); } else { if (parsesArrayValue(objectMapperFromTemplate)) { context.addDynamicMapper(objectMapperFromTemplate); - context.path().add(arrayFieldName); + context.path().add(lastFieldName); parseObjectOrField(context, objectMapperFromTemplate); context.path().remove(); } else { - parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + parseNonDynamicArray(context, parentMapper, lastFieldName, lastFieldName); } } } - for (int i = 0; i < parentMapperTuple.v1(); i++) { - context.path().remove(); - } } } @@ -683,14 +650,14 @@ private static void parseNonDynamicArray( ) throws IOException { XContentParser parser = context.parser(); XContentParser.Token token; - final String[] paths = splitAndValidatePath(lastFieldName); + splitAndValidatePath(lastFieldName); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.START_OBJECT) { - parseObject(context, mapper, lastFieldName, paths); + parseObject(context, mapper, lastFieldName); } else if (token == XContentParser.Token.START_ARRAY) { - parseArray(context, mapper, lastFieldName, paths); + parseArray(context, mapper, lastFieldName); } else if (token == XContentParser.Token.VALUE_NULL) { - parseNullValue(context, mapper, lastFieldName, paths); + parseNullValue(context, mapper, lastFieldName); } else if (token == null) { throw new MapperParsingException( "object mapping for [" @@ -701,7 +668,7 @@ private static void parseNonDynamicArray( ); } else { assert token.isValue(); - parseValue(context, mapper, lastFieldName, token, paths); + parseValue(context, mapper, lastFieldName, token); } } } @@ -710,8 +677,7 @@ private static void parseValue( final DocumentParserContext context, ObjectMapper parentMapper, String currentFieldName, - XContentParser.Token token, - String[] paths + XContentParser.Token token ) throws IOException { if (currentFieldName == null) { throw new MapperParsingException( @@ -723,24 +689,17 @@ private static void parseValue( + "]" ); } - Mapper mapper = getLeafMapper(context, parentMapper, currentFieldName, paths); + Mapper mapper = getLeafMapper(context, parentMapper, currentFieldName); if (mapper != null) { parseObjectOrField(context, mapper); } else { - currentFieldName = paths[paths.length - 1]; - Tuple parentMapperTuple = getDynamicParentMapper(context, paths, parentMapper); - parentMapper = parentMapperTuple.v2(); parseDynamicValue(context, parentMapper, currentFieldName, token); - for (int i = 0; i < parentMapperTuple.v1(); i++) { - context.path().remove(); - } } } - private static void parseNullValue(DocumentParserContext context, ObjectMapper parentMapper, String lastFieldName, String[] paths) - throws IOException { + private static void parseNullValue(DocumentParserContext context, ObjectMapper parentMapper, String lastFieldName) throws IOException { // we can only handle null values if we have mappings for them - Mapper mapper = getLeafMapper(context, parentMapper, lastFieldName, paths); + Mapper mapper = getLeafMapper(context, parentMapper, lastFieldName); if (mapper != null) { // TODO: passing null to an object seems bogus? parseObjectOrField(context, mapper); @@ -782,7 +741,6 @@ private static void failIfMatchesRoutingPath(DocumentParserContext context, Obje * Creates instances of the fields that the current field should be copied to */ private static void parseCopyFields(DocumentParserContext context, List copyToFields) throws IOException { - context = context.createCopyToContext(); for (String field : copyToFields) { // In case of a hierarchy of nested documents, we need to figure out // which document the field should go to @@ -794,112 +752,11 @@ private static void parseCopyFields(DocumentParserContext context, List } } assert targetDoc != null; - final DocumentParserContext copyToContext; - if (targetDoc == context.doc()) { - copyToContext = context; - } else { - copyToContext = context.switchDoc(targetDoc); - } - parseCopy(field, copyToContext); + final DocumentParserContext copyToContext = context.createCopyToContext(field, targetDoc); + innerParseObject(copyToContext, context.root()); } } - /** - * Creates an copy of the current field with given field name and boost - */ - private static void parseCopy(String field, DocumentParserContext context) throws IOException { - Mapper mapper = context.mappingLookup().getMapper(field); - if (mapper != null) { - if (mapper instanceof FieldMapper) { - ((FieldMapper) mapper).parse(context); - } else if (mapper instanceof FieldAliasMapper) { - throw new IllegalArgumentException("Cannot copy to a field alias [" + mapper.name() + "]."); - } else { - throw new IllegalStateException( - "The provided mapper [" + mapper.name() + "] has an unrecognized type [" + mapper.getClass().getSimpleName() + "]." - ); - } - } else { - // The path of the dest field might be completely different from the current one so we need to reset it - context = context.overridePath(new ContentPath(0)); - - final String[] paths = splitAndValidatePath(field); - final String fieldName = paths[paths.length - 1]; - Tuple parentMapperTuple = getDynamicParentMapper(context, paths, null); - ObjectMapper objectMapper = parentMapperTuple.v2(); - parseDynamicValue(context, objectMapper, fieldName, context.parser().currentToken()); - for (int i = 0; i < parentMapperTuple.v1(); i++) { - context.path().remove(); - } - } - } - - private static Tuple getDynamicParentMapper( - DocumentParserContext context, - final String[] paths, - ObjectMapper currentParent - ) { - ObjectMapper mapper = currentParent == null ? context.root() : currentParent; - int pathsAdded = 0; - ObjectMapper parent = mapper; - for (int i = 0; i < paths.length - 1; i++) { - String name = paths[i]; - String currentPath = context.path().pathAsText(name); - Mapper existingFieldMapper = context.mappingLookup().getMapper(currentPath); - if (existingFieldMapper != null) { - throw new MapperParsingException( - "Could not dynamically add mapping for field [{}]. Existing mapping for [{}] must be of type object but found [{}].", - null, - String.join(".", paths), - currentPath, - existingFieldMapper.typeName() - ); - } - mapper = context.mappingLookup().objectMappers().get(currentPath); - if (mapper == null) { - // One mapping is missing, check if we are allowed to create a dynamic one. - ObjectMapper.Dynamic dynamic = dynamicOrDefault(parent, context); - if (dynamic == ObjectMapper.Dynamic.STRICT) { - throw new StrictDynamicMappingException(parent.fullPath(), name); - } else if (dynamic == ObjectMapper.Dynamic.FALSE) { - // Should not dynamically create any more mappers so return the last mapper - return new Tuple<>(pathsAdded, parent); - } else if (dynamic == ObjectMapper.Dynamic.RUNTIME) { - mapper = new NoOpObjectMapper(name, currentPath); - } else { - final Mapper fieldMapper = dynamic.getDynamicFieldsBuilder().createDynamicObjectMapper(context, name); - if (fieldMapper instanceof ObjectMapper == false) { - assert context.sourceToParse().dynamicTemplates().containsKey(currentPath) - : "dynamic templates [" + context.sourceToParse().dynamicTemplates() + "]"; - throw new MapperParsingException( - "Field [" - + currentPath - + "] must be an object; " - + "but it's configured as [" - + fieldMapper.typeName() - + "] in dynamic template [" - + context.sourceToParse().dynamicTemplates().get(currentPath) - + "]" - ); - } - mapper = (ObjectMapper) fieldMapper; - if (mapper.isNested()) { - throw new MapperParsingException( - "It is forbidden to create dynamic nested objects ([" - + currentPath - + "]) through `copy_to` or dots in field names" - ); - } - context.addDynamicMapper(mapper); - } - } - context.path().add(paths[i]); - pathsAdded++; - parent = mapper; - } - return new Tuple<>(pathsAdded, mapper); - } - // find what the dynamic setting is given the current parse context and parent private static ObjectMapper.Dynamic dynamicOrDefault(ObjectMapper parentMapper, DocumentParserContext context) { ObjectMapper.Dynamic dynamic = parentMapper.dynamic(); @@ -927,48 +784,25 @@ private static ObjectMapper.Dynamic dynamicOrDefault(ObjectMapper parentMapper, return dynamic; } - // looks up a child mapper, but takes into account field names that expand to objects + // looks up a child mapper // returns null if no such child mapper exists - note that unlike getLeafMapper, // we do not check for shadowing runtime fields because they only apply to leaf // fields - private static Mapper getMapper(final DocumentParserContext context, ObjectMapper objectMapper, String fieldName, String[] subfields) { + private static Mapper getMapper(final DocumentParserContext context, ObjectMapper objectMapper, String fieldName) { String fieldPath = context.path().pathAsText(fieldName); // Check if mapper is a metadata mapper first Mapper mapper = context.getMetadataMapper(fieldPath); if (mapper != null) { return mapper; } - - for (int i = 0; i < subfields.length - 1; ++i) { - mapper = objectMapper.getMapper(subfields[i]); - if (mapper instanceof ObjectMapper == false) { - return null; - } - objectMapper = (ObjectMapper) mapper; - if (objectMapper.isNested()) { - throw new MapperParsingException( - "Cannot add a value for field [" - + fieldName - + "] since one of the intermediate objects is mapped as a nested object: [" - + mapper.name() - + "]" - ); - } - } - String leafName = subfields[subfields.length - 1]; - return objectMapper.getMapper(leafName); + return objectMapper.getMapper(fieldName); } // looks up a child mapper, taking into account field names that expand to objects // if no mapper is found, checks to see if a runtime field with the specified // field name exists and if so returns a no-op mapper to prevent indexing - private static Mapper getLeafMapper( - final DocumentParserContext context, - ObjectMapper objectMapper, - String fieldName, - String[] subfields - ) { - Mapper mapper = getMapper(context, objectMapper, fieldName, subfields); + private static Mapper getLeafMapper(final DocumentParserContext context, ObjectMapper objectMapper, String fieldName) { + Mapper mapper = getMapper(context, objectMapper, fieldName); if (mapper != null) { return mapper; } @@ -1086,9 +920,9 @@ private static class InternalDocumentParserContext extends DocumentParserContext Function parserContext, SourceToParse source, XContentParser parser - ) { + ) throws IOException { super(mappingLookup, indexSettings, indexAnalyzers, parserContext, source); - this.parser = parser; + this.parser = DotExpandingXContentParser.expandDots(parser); this.document = new LuceneDocument(); this.documents.add(document); this.maxAllowedNumNestedDocs = indexSettings().getMappingNestedDocsLimit(); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index 50a0dd4eaac1f..1dd7de4167da0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -12,8 +12,11 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.xcontent.DotExpandingXContentParser; +import org.elasticsearch.xcontent.FilterXContentParser; import org.elasticsearch.xcontent.XContentParser; +import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -247,18 +250,6 @@ public final List getDynamicRuntimeFields() { */ public abstract Iterable nonRootDocuments(); - /** - * Return a new context that will be within a copy-to operation. - */ - public final DocumentParserContext createCopyToContext() { - return new Wrapper(this) { - @Override - public boolean isWithinCopyTo() { - return true; - } - }; - } - public boolean isWithinCopyTo() { return false; } @@ -267,6 +258,10 @@ public boolean isWithinCopyTo() { * Return a new context that will be used within a nested document. */ public final DocumentParserContext createNestedContext(String fullPath) { + if (isWithinCopyTo()) { + // nested context will already have been set up for copy_to fields + return this; + } final LuceneDocument doc = new LuceneDocument(fullPath, doc()); addDoc(doc); return switchDoc(doc); @@ -285,20 +280,42 @@ public LuceneDocument doc() { } /** - * Return a new context that will have the provided path. + * Return a context for copy_to directives + * @param copyToField the name of the field to copy to + * @param doc the document to target */ - public final DocumentParserContext overridePath(final ContentPath path) { + public final DocumentParserContext createCopyToContext(String copyToField, LuceneDocument doc) throws IOException { + ContentPath path = new ContentPath(0); + XContentParser parser = DotExpandingXContentParser.expandDots(new CopyToParser(copyToField, parser())); return new Wrapper(this) { @Override public ContentPath path() { return path; } + + @Override + public XContentParser parser() { + return parser; + } + + @Override + public boolean isWithinCopyTo() { + return true; + } + + @Override + public LuceneDocument doc() { + return doc; + } }; } /** - * @deprecated we are actively deprecating and removing the ability to pass - * complex objects to multifields, so try and avoid using this method + * @deprecated we are actively deprecating and removing the ability to pass + * complex objects to multifields, so try and avoid using this method + * Replace the XContentParser used by this context + * @param parser the replacement parser + * @return a new context with a replaced parser */ @Deprecated public final DocumentParserContext switchParser(XContentParser parser) { @@ -343,4 +360,45 @@ public final DynamicTemplate findDynamicTemplate(String fieldName, DynamicTempla } return null; } + + // XContentParser that wraps an existing parser positioned on a value, + // and a field name, and returns a stream that looks like { 'field' : 'value' } + private static class CopyToParser extends FilterXContentParser { + + enum State { + FIELD, + VALUE + } + + private State state = State.FIELD; + private final String field; + + CopyToParser(String fieldName, XContentParser in) { + super(in); + this.field = fieldName; + assert in.currentToken().isValue() || in.currentToken() == Token.VALUE_NULL; + } + + @Override + public Token nextToken() throws IOException { + if (state == State.FIELD) { + state = State.VALUE; + return in.currentToken(); + } + return Token.END_OBJECT; + } + + @Override + public Token currentToken() { + if (state == State.FIELD) { + return Token.FIELD_NAME; + } + return in.currentToken(); + } + + @Override + public String currentName() throws IOException { + return field; + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index b663ab5c5c659..a4bfe18814b7e 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -1384,6 +1384,9 @@ private static Number value(XContentParser parser, NumberType numberType, Number if (coerce && parser.currentToken() == Token.VALUE_STRING && parser.textLength() == 0) { return nullValue; } + if (parser.currentToken() == Token.START_OBJECT) { + throw new IllegalArgumentException("Cannot parse object as number"); + } return numberType.parse(parser, coerce); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java index 82c9337a45690..6c7ffe1aee22f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocumentParserTests.java @@ -315,32 +315,32 @@ public void testFieldDisabled() throws Exception { public void testDotsWithFieldDisabled() throws IOException { DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("enabled", false))); { - ParsedDocument doc = mapper.parse(source(b -> b.field("field.bar", 111))); + ParsedDocument doc = mapper.parse(source(b -> { + b.field("field.bar", "string value"); + b.field("blub", 222); + })); assertNull(doc.rootDoc().getField("field")); assertNull(doc.rootDoc().getField("bar")); assertNull(doc.rootDoc().getField("field.bar")); + assertNotNull(doc.rootDoc().getField("blub")); } { - ParsedDocument doc = mapper.parse(source(b -> b.field("field.bar", new int[] { 1, 2, 3 }))); + ParsedDocument doc = mapper.parse(source(b -> b.field("field.bar", 111))); assertNull(doc.rootDoc().getField("field")); assertNull(doc.rootDoc().getField("bar")); assertNull(doc.rootDoc().getField("field.bar")); } { - ParsedDocument doc = mapper.parse(source(b -> b.field("field.bar", Collections.singletonMap("key", "value")))); + ParsedDocument doc = mapper.parse(source(b -> b.field("field.bar", new int[] { 1, 2, 3 }))); assertNull(doc.rootDoc().getField("field")); assertNull(doc.rootDoc().getField("bar")); assertNull(doc.rootDoc().getField("field.bar")); } { - ParsedDocument doc = mapper.parse(source(b -> { - b.field("field.bar", "string value"); - b.field("blub", 222); - })); + ParsedDocument doc = mapper.parse(source(b -> b.field("field.bar", Collections.singletonMap("key", "value")))); assertNull(doc.rootDoc().getField("field")); assertNull(doc.rootDoc().getField("bar")); assertNull(doc.rootDoc().getField("field.bar")); - assertNotNull(doc.rootDoc().getField("blub")); } } @@ -400,11 +400,8 @@ public void testDotsWithExistingNestedMapper() throws Exception { b.endObject(); })); - MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.field("field.bar", 123)))); - assertEquals( - "Cannot add a value for field [field.bar] since one of the intermediate objects is mapped as a nested object: [field]", - e.getMessage() - ); + ParsedDocument doc = mapper.parse(source(b -> b.field("field.bar", 123))); + assertEquals(123, doc.docs().get(0).getNumericValue("field.bar")); } public void testUnexpectedFieldMappingType() throws Exception { @@ -440,8 +437,8 @@ public void testDotsWithDynamicNestedMapper() throws Exception { b.endArray(); })); - MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.field("foo.bar", 42)))); - assertEquals("It is forbidden to create dynamic nested objects ([foo]) through `copy_to` or dots in field names", e.getMessage()); + ParsedDocument doc = mapper.parse(source(b -> b.field("foo.bar", 42))); + assertEquals(42L, doc.docs().get(0).getNumericValue("foo.bar")); } public void testNestedHaveIdAndTypeFields() throws Exception { @@ -1199,10 +1196,7 @@ public void testWrongTypeDynamicTemplate() throws Exception { MapperParsingException.class, () -> mapper.parse(source("1", b -> b.field(field, "true"), null, Map.of("foo", "booleans"))) ); - assertThat( - error.getMessage(), - containsString("Field [foo] must be an object; but it's configured as [boolean] in dynamic template [booleans]") - ); + assertThat(error.getMessage(), containsString("failed to parse field [foo] of type [boolean]")); ParsedDocument doc = mapper.parse(source("1", b -> b.field(field, "true"), null, Map.of(field, "booleans"))); IndexableField[] fields = doc.rootDoc().getFields(field); @@ -1232,11 +1226,7 @@ public void testDynamicDottedFieldNameLongArrayWithExistingParentWrongType() thr MapperParsingException.class, () -> mapper.parse(source(b -> b.startArray("field.bar.baz").value(0).value(1).endArray())) ); - assertEquals( - "Could not dynamically add mapping for field [field.bar.baz]. " - + "Existing mapping for [field] must be of type object but found [long].", - exception.getMessage() - ); + assertThat(exception.getMessage(), containsString("failed to parse field [field] of type [long]")); } public void testDynamicFalseDottedFieldNameLongArray() throws Exception { @@ -1321,11 +1311,7 @@ public void testDynamicDottedFieldNameLongWithExistingParentWrongType() throws E MapperParsingException.class, () -> mapper.parse(source(b -> b.field("field.bar.baz", 0))) ); - assertEquals( - "Could not dynamically add mapping for field [field.bar.baz]. " - + "Existing mapping for [field] must be of type object but found [long].", - exception.getMessage() - ); + assertThat(exception.getMessage(), containsString("failed to parse field [field] of type [long]")); } public void testDynamicFalseDottedFieldNameLong() throws Exception { @@ -1420,11 +1406,7 @@ public void testDynamicDottedFieldNameObjectWithExistingParentWrongType() throws MapperParsingException.class, () -> mapper.parse(source(b -> b.startObject("field.bar.baz").field("a", 0).endObject())) ); - assertEquals( - "Could not dynamically add mapping for field [field.bar.baz]. " - + "Existing mapping for [field] must be of type object but found [long].", - exception.getMessage() - ); + assertThat(exception.getMessage(), containsString("failed to parse field [field] of type [long]")); } public void testDynamicFalseDottedFieldNameObject() throws Exception { @@ -1785,22 +1767,6 @@ public void testDynamicDateDetectionEnabledWithNoSpecialCharacters() throws IOEx public void testDynamicFieldsStartingAndEndingWithDot() throws Exception { MapperService mapperService = createMapperService(mapping(b -> {})); - merge(mapperService, dynamicMapping(mapperService.documentMapper().parse(source(b -> { - b.startArray("top."); - { - b.startObject(); - { - b.startArray("foo."); - { - b.startObject().field("thing", "bah").endObject(); - } - b.endArray(); - } - b.endObject(); - } - b.endArray(); - })).dynamicMappingsUpdate())); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> mapperService.documentMapper().parse(source(b -> { b.startArray("top."); { @@ -1827,7 +1793,7 @@ public void testDynamicFieldsStartingAndEndingWithDot() throws Exception { assertThat( e.getMessage(), - containsString("object field starting or ending with a [.] makes object resolution ambiguous: [top..foo..bar]") + containsString("object field starting or ending with a [.] makes object resolution ambiguous: [top..foo.]") ); } @@ -1835,7 +1801,7 @@ public void testDynamicFieldsEmptyName() throws Exception { DocumentMapper mapper = createDocumentMapper(mapping(b -> {})); IllegalArgumentException emptyFieldNameException = expectThrows(IllegalArgumentException.class, () -> mapper.parse(source(b -> { - b.startArray("top."); + b.startArray("top"); { b.startObject(); { @@ -1889,7 +1855,7 @@ public void testWriteToFieldAlias() throws Exception { () -> mapper.parse(source(b -> b.field("alias-field", "value"))) ); - assertEquals("Cannot write to a field alias [alias-field].", exception.getCause().getMessage()); + assertEquals("Cannot write to a field alias [alias-field].", exception.getMessage()); } public void testCopyToFieldAlias() throws Exception { @@ -1914,7 +1880,7 @@ public void testCopyToFieldAlias() throws Exception { () -> mapper.parse(source(b -> b.field("text-field", "value"))) ); - assertEquals("Cannot copy to a field alias [alias-field].", exception.getCause().getMessage()); + assertEquals("Cannot copy to a field alias [alias-field].", exception.getMessage()); } public void testDynamicDottedFieldNameWithFieldAlias() throws Exception { @@ -1933,11 +1899,7 @@ public void testDynamicDottedFieldNameWithFieldAlias() throws Exception { () -> mapper.parse(source(b -> b.startObject("alias-field.dynamic-field").field("type", "keyword").endObject())) ); - assertEquals( - "Could not dynamically add mapping for field [alias-field.dynamic-field]. " - + "Existing mapping for [alias-field] must be of type object but found [alias].", - exception.getMessage() - ); + assertEquals("Cannot write to a field alias [alias-field].", exception.getMessage()); } public void testMultifieldOverwriteFails() throws Exception { @@ -1962,12 +1924,7 @@ public void testMultifieldOverwriteFails() throws Exception { MapperParsingException.class, () -> mapper.parse(source(b -> b.field("message", "original").field("message.text", "overwrite"))) ); - - assertEquals( - "Could not dynamically add mapping for field [message.text]. " - + "Existing mapping for [message] must be of type object but found [keyword].", - exception.getMessage() - ); + assertThat(exception.getMessage(), containsString("failed to parse field [message] of type [keyword]")); } public void testTypeless() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java index 01e5b3b375a12..2c1bd847b394a 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java @@ -199,8 +199,7 @@ public void testIgnoreMalformedWithObject() throws Exception { b.field("ignore_malformed", ignoreMalformed); })); MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse(malformed)); - assertThat(e.getCause().getMessage(), containsString("Current token")); - assertThat(e.getCause().getMessage(), containsString("not numeric, can not use numeric value accessors")); + assertThat(e.getCause().getMessage(), containsString("Cannot parse object as number")); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java index 76c98f6cadbfe..73a9f2374c2f5 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/flattened/FlattenedFieldMapperTests.java @@ -399,4 +399,26 @@ protected Object generateRandomInputValue(MappedFieldType ft) { assumeFalse("Test implemented in a follow up", true); return null; } + + public void testDynamicTemplateAndDottedPaths() throws IOException { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> { + b.startArray("dynamic_templates"); + b.startObject(); + b.startObject("no_deep_objects"); + b.field("path_match", "*.*.*"); + b.field("match_mapping_type", "object"); + b.startObject("mapping"); + b.field("type", "flattened"); + b.endObject(); + b.endObject(); + b.endObject(); + b.endArray(); + })); + + ParsedDocument doc = mapper.parse(source(b -> b.field("a.b.c.d", "value"))); + IndexableField[] fields = doc.rootDoc().getFields("a.b.c"); + assertEquals(new BytesRef("value"), fields[0].binaryValue()); + IndexableField[] keyed = doc.rootDoc().getFields("a.b.c._keyed"); + assertEquals(new BytesRef("d\0value"), keyed[0].binaryValue()); + } } From 49feb651a399755e2109b27fb485cbca22eac09c Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 23 Nov 2021 16:55:47 +0000 Subject: [PATCH 23/88] Remove unused ConnectTransportException#node (#80944) `ConnectTransportException#node` is only used in a couple of assertions in tests, but those assertions are either unnecessary or can be rewritten without it so this field is effectively unused. This commit removes it. --- .../transport/ConnectTransportException.java | 16 +++++++--------- .../ExceptionSerializationTests.java | 2 -- .../AbstractSimpleTransportTestCase.java | 5 +++-- 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java index 08ced6736aa76..aa480d0c4ad0e 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java @@ -8,6 +8,7 @@ package org.elasticsearch.transport; +import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -16,8 +17,6 @@ public class ConnectTransportException extends ActionTransportException { - private final DiscoveryNode node; - public ConnectTransportException(DiscoveryNode node, String msg) { this(node, msg, null, null); } @@ -32,21 +31,20 @@ public ConnectTransportException(DiscoveryNode node, String msg, Throwable cause public ConnectTransportException(DiscoveryNode node, String msg, String action, Throwable cause) { super(node == null ? null : node.getName(), node == null ? null : node.getAddress(), action, msg, cause); - this.node = node; } public ConnectTransportException(StreamInput in) throws IOException { super(in); - node = in.readOptionalWriteable(DiscoveryNode::new); + if (in.getVersion().before(Version.V_8_1_0)) { + in.readOptionalWriteable(DiscoveryNode::new); + } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeOptionalWriteable(node); - } - - public DiscoveryNode node() { - return node; + if (out.getVersion().before(Version.V_8_1_0)) { + out.writeBoolean(false); // optional & unused node field + } } } diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index b86a55c7c9630..be59a20167e13 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -410,12 +410,10 @@ public void testConnectTransportException() throws IOException { DiscoveryNode node = new DiscoveryNode("thenode", transportAddress, emptyMap(), emptySet(), Version.CURRENT); ConnectTransportException ex = serialize(new ConnectTransportException(node, "msg", "action", null)); assertEquals("[][" + transportAddress + "][action] msg", ex.getMessage()); - assertEquals(node, ex.node()); assertNull(ex.getCause()); ex = serialize(new ConnectTransportException(node, "msg", "action", new NullPointerException())); assertEquals("[][" + transportAddress + "][action] msg", ex.getMessage()); - assertEquals(node, ex.node()); assertTrue(ex.getCause() instanceof NullPointerException); } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 27f303a04cee0..81b60bb18acc1 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -87,6 +87,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -1721,7 +1722,7 @@ public void handleResponse(StringMessageResponse response) { public void handleException(TransportException exp) { Throwable cause = ExceptionsHelper.unwrapCause(exp); assertThat(cause, instanceOf(ConnectTransportException.class)); - assertThat(((ConnectTransportException) cause).node(), equalTo(nodeA)); + assertThat(cause.getMessage(), allOf(containsString(nodeA.getName()), containsString(nodeA.getAddress().toString()))); } } ); @@ -1729,7 +1730,7 @@ public void handleException(TransportException exp) { final ExecutionException e = expectThrows(ExecutionException.class, res::get); Throwable cause = ExceptionsHelper.unwrapCause(e.getCause()); assertThat(cause, instanceOf(ConnectTransportException.class)); - assertThat(((ConnectTransportException) cause).node(), equalTo(nodeA)); + assertThat(cause.getMessage(), allOf(containsString(nodeA.getName()), containsString(nodeA.getAddress().toString()))); // wait for the transport to process the sending failure and disconnect from node assertBusy(() -> assertFalse(serviceB.nodeConnected(nodeA))); From 95c2c73ab276a1a7a6908bc3d4efe3a575a66eb7 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Tue, 23 Nov 2021 17:41:35 +0000 Subject: [PATCH 24/88] [ML] Removing temporary debug (#80956) This log line was supposed to be removed before committing the PR it was added in. --- .../main/java/org/elasticsearch/xpack/ml/MachineLearning.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index f2b512eb5361f..279a17ea34467 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -650,7 +650,6 @@ protected XPackLicenseState getLicenseState() { } public static boolean isMlNode(DiscoveryNode node) { - logger.info("DMR node roles are " + node.getRoles()); return node.getRoles().contains(DiscoveryNodeRole.ML_ROLE); } From a6d58998164513c199b0dffc2ccf5f24af215ba8 Mon Sep 17 00:00:00 2001 From: weizijun Date: Wed, 24 Nov 2021 01:52:33 +0800 Subject: [PATCH 25/88] TSDB: move TimeSeriesModeIT to yaml tests (#80933) I move TimeSeriesModeIT to yaml tests. I move the below lists to 15_timestamp_mapping.yml: ``` testTimestampMillis -> date testTimestampNanos -> date_nanos testAddsTimestamp -> automatically add with date testBadTimestamp -> reject @timestamp with wrong type testDisabledTimeStampMapper -> reject timestamp meta field with wrong type ``` And add some tests in 15_timestamp_mapping.yml: ``` testBadTimeStampMapper -> reject bad timestamp meta field testWithoutTimestamp -> write without timestamp testAddTimeStampMeta -> add timestamp meta testEnabledTimeStampMapper -> enable timestamp meta field ``` AND move the below lists to 10_settings.yml: ``` testEnableTimestampRange -> set start_time and end_time ``` And add some tests in 10_settings.yml: ``` testBadStartTime\testBadEndTime -> set bad start_time and end_time ``` --- .../rest-api-spec/test/tsdb/10_settings.yml | 62 ++ .../test/tsdb/15_timestamp_mapping.yml | 137 +++- .../elasticsearch/index/TimeSeriesModeIT.java | 600 ------------------ 3 files changed, 198 insertions(+), 601 deletions(-) delete mode 100644 server/src/internalClusterTest/java/org/elasticsearch/index/TimeSeriesModeIT.java diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml index a1ac1f363d19b..709f633e74820 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml @@ -184,6 +184,11 @@ set start_time and end_time: time_series: start_time: 1632625782000 end_time: 1632625792000 + mappings: + properties: + metricset: + type: keyword + time_series_dimension: true - do: indices.put_settings: @@ -202,6 +207,23 @@ set start_time and end_time: time_series: end_time: 1632625792000 + - do: + index: + refresh: true + index: test_index + body: { + "@timestamp": 1632625792000, + "metricset": "pod" + } + + - do: + search: + index: test_index + body: + docvalue_fields: [ '@timestamp' ] + - match: { hits.total.value: 1 } + - match: { "hits.hits.0.fields.@timestamp": [ "2021-09-26T03:09:52.000Z" ] } + - do: indices.delete: index: test_index @@ -231,6 +253,46 @@ set start_time and end_time without timeseries mode: time_series: end_time: 1632625782000 +--- +set bad start_time and end_time: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + - do: + indices.create: + index: test_index + body: + settings: + index: + mode: time_series + routing_path: [ metricset ] + time_series: + start_time: 1632625782000 + end_time: 1632625792000 + mappings: + properties: + metricset: + type: keyword + time_series_dimension: true + + - do: + catch: /time series index @timestamp value \[2021-09-26T03:09:41Z\] must be larger than 2021-09-26T03:09:42Z/ + index: + index: test_index + body: { + "@timestamp": 1632625781000, + "metricset": "pod" + } + + - do: + catch: /time series index @timestamp value \[2021-09-26T03:09:53Z\] must be smaller than 2021-09-26T03:09:52Z/ + index: + index: test_index + body: { + "@timestamp": 1632625793000, + "metricset": "pod" + } + --- check start_time and end_time with data_nano: - skip: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml index 44d0d42b804bf..5c15da33012a5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/15_timestamp_mapping.yml @@ -161,7 +161,7 @@ reject @timestamp with wrong type: reject timestamp meta field with wrong type: - skip: version: " - 8.0.99" - reason: introduced in 8.0.0 to be backported to 7.16.0 + reason: introduced in 8.1.0 - do: catch: /\[_data_stream_timestamp\] meta field has been disabled/ @@ -177,3 +177,138 @@ reject timestamp meta field with wrong type: mappings: _data_stream_timestamp: enabled: false + +--- +enable timestamp meta field: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset] + number_of_replicas: 0 + number_of_shards: 2 + mappings: + _data_stream_timestamp: + enabled: true + + - do: + indices.get_mapping: + index: test + - match: { "test.mappings.properties.@timestamp.type": date } + - match: { 'test.mappings._data_stream_timestamp.enabled': true } + +--- +reject bad timestamp meta field: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + + - do: + catch: /\[_data_stream_timestamp\] config must be an object/ + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset] + number_of_replicas: 0 + number_of_shards: 2 + mappings: + _data_stream_timestamp: enabled + +--- +write without timestamp: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset] + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + + - do: + indices.get_mapping: + index: test + - match: { "test.mappings.properties.@timestamp.type": date } + - match: { 'test.mappings._data_stream_timestamp.enabled': true } + + - do: + catch: /data stream timestamp field \[@timestamp\] is missing/ + index: + index: test + body: + "metricset": "pod" + +--- +explicitly enable timestamp meta field: + - skip: + version: " - 8.0.99" + reason: introduced in 8.1.0 + + - do: + indices.create: + index: test + body: + settings: + index: + mode: time_series + routing_path: [metricset] + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + meta: + field_meta: time_series + metricset: + type: keyword + time_series_dimension: true + + - do: + index: + refresh: true + index: test + body: { + "@timestamp": 1632625793000, + "metricset": "pod", + "new_field" : "value" + } + + - do: + search: + index: test + body: + docvalue_fields: [ '@timestamp', 'new_field.keyword' ] + - match: { hits.total.value: 1 } + - match: { hits.hits.0.fields.@timestamp: [ "2021-09-26T03:09:53.000Z" ] } + - match: { hits.hits.0.fields.new_field\.keyword: [ "value" ] } + + - do: + indices.get_mapping: + index: test + - match: { "test.mappings.properties.@timestamp.type": date } + - match: { "test.mappings.properties.@timestamp.meta.field_meta": time_series } + - match: { 'test.mappings._data_stream_timestamp.enabled': true } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/TimeSeriesModeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/TimeSeriesModeIT.java deleted file mode 100644 index 657772a4d3e58..0000000000000 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/TimeSeriesModeIT.java +++ /dev/null @@ -1,600 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.index; - -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.action.DocWriteResponse.Result; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; - -import java.io.IOException; -import java.util.Locale; -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; - -public class TimeSeriesModeIT extends ESIntegTestCase { - public void testDisabledTimeStampMapper() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - XContentBuilder mappings = XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject(DataStreamTimestampFieldMapper.NAME) - .field("enabled", false) - .endObject() - .endObject() - .endObject(); - - Exception e = expectThrows(IllegalStateException.class, () -> prepareCreate("test").setSettings(s).setMapping(mappings).get()); - assertThat(e.getMessage(), equalTo("[_data_stream_timestamp] meta field has been disabled")); - } - - public void testBadTimeStampMapper() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - XContentBuilder mappings = XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .field(DataStreamTimestampFieldMapper.NAME, "enabled") - .endObject() - .endObject(); - - Exception e = expectThrows(MapperParsingException.class, () -> prepareCreate("test").setSettings(s).setMapping(mappings).get()); - assertThat(e.getMessage(), equalTo("Failed to parse mapping: [_data_stream_timestamp] config must be an object")); - } - - public void testBadTimestamp() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - String type = randomFrom("keyword", "integer", "long", "double", "text"); - XContentBuilder mappings = XContentFactory.jsonBuilder(); - mappings.startObject(); - { - mappings.startObject("_doc"); - { - mappings.startObject("properties"); - { - mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); - { - mappings.field("type", type); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - Exception e = expectThrows(IllegalArgumentException.class, () -> prepareCreate("test").setSettings(s).setMapping(mappings).get()); - assertThat( - e.getMessage(), - equalTo("data stream timestamp field [@timestamp] is of type [" + type + "], but [date,date_nanos] is expected") - ); - } - - public void testAddsTimestamp() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - - String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - prepareCreate(index).setSettings(s).get(); - ensureGreen(index); - - GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).get(); - assertThat(getMappingsResponse.getMappings().size(), equalTo(1)); - - XContentBuilder expect = XContentFactory.jsonBuilder(); - expect.startObject(); - { - expect.startObject("_doc"); - { - expect.startObject(DataStreamTimestampFieldMapper.NAME); - { - expect.field("enabled", true); - } - expect.endObject(); - expect.startObject("properties"); - { - expect.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); - { - expect.field("type", "date"); - } - expect.endObject(); - } - expect.endObject(); - } - expect.endObject(); - } - expect.endObject(); - assertThat(getMappingsResponse.getMappings().get(index).source().string(), equalTo(Strings.toString(expect))); - } - - public void testTimestampMillis() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - - XContentBuilder mappings = XContentFactory.jsonBuilder(); - mappings.startObject(); - { - mappings.startObject("_doc"); - { - mappings.startObject("properties"); - { - mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); - { - mappings.field("type", "date"); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - - String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - prepareCreate(index).setSettings(s).setMapping(mappings).get(); - ensureGreen(index); - - GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).get(); - XContentBuilder expect = XContentFactory.jsonBuilder(); - expect.startObject(); - { - expect.startObject("_doc"); - { - expect.startObject(DataStreamTimestampFieldMapper.NAME); - { - expect.field("enabled", true); - } - expect.endObject(); - expect.startObject("properties"); - { - expect.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); - { - expect.field("type", "date"); - } - expect.endObject(); - } - expect.endObject(); - } - expect.endObject(); - } - expect.endObject(); - assertThat(getMappingsResponse.getMappings().get(index).source().string(), equalTo(Strings.toString(expect))); - } - - public void testTimestampNanos() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - - XContentBuilder mappings = XContentFactory.jsonBuilder(); - mappings.startObject(); - { - mappings.startObject("_doc"); - { - mappings.startObject("properties"); - { - mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); - { - mappings.field("type", "date_nanos"); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - - String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - prepareCreate(index).setSettings(s).setMapping(mappings).get(); - ensureGreen(index); - - GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).get(); - XContentBuilder expect = XContentFactory.jsonBuilder(); - expect.startObject(); - { - expect.startObject("_doc"); - { - expect.startObject(DataStreamTimestampFieldMapper.NAME); - { - expect.field("enabled", true); - } - expect.endObject(); - expect.startObject("properties"); - { - expect.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); - { - expect.field("type", "date_nanos"); - } - expect.endObject(); - } - expect.endObject(); - } - expect.endObject(); - } - expect.endObject(); - assertThat(getMappingsResponse.getMappings().get(index).source().string(), equalTo(Strings.toString(expect))); - } - - public void testWithoutTimestamp() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - - XContentBuilder mappings = XContentFactory.jsonBuilder(); - mappings.startObject(); - { - mappings.startObject("_doc"); - { - mappings.startObject("properties"); - { - mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); - { - mappings.field("type", "date"); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - - String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - prepareCreate(index).setSettings(s).setMapping(mappings).get(); - ensureGreen(index); - - MapperParsingException e = expectThrows( - MapperParsingException.class, - () -> index(index, XContentFactory.jsonBuilder().startObject().field("foo", "bar").endObject()) - ); - assertThat(e.getRootCause().getMessage(), containsString("data stream timestamp field [@timestamp] is missing")); - } - - public void testEnableTimestampRange() throws IOException { - long endTime = System.currentTimeMillis(); - long startTime = endTime - TimeUnit.DAYS.toMillis(1); - - Settings s = Settings.builder() - .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), startTime) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), endTime) - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - - XContentBuilder mappings = XContentFactory.jsonBuilder(); - mappings.startObject(); - { - mappings.startObject("_doc"); - { - mappings.startObject("properties"); - { - mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); - { - mappings.field("type", randomBoolean() ? "date" : "date_nanos"); - } - mappings.endObject(); - mappings.startObject("foo"); - { - mappings.field("type", "keyword"); - mappings.field("time_series_dimension", true); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - - String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - prepareCreate(index).setSettings(s).setMapping(mappings).get(); - ensureGreen(index); - - IndexResponse indexResponse = index( - index, - XContentFactory.jsonBuilder() - .startObject() - .field("foo", "bar") - .field("@timestamp", randomLongBetween(startTime, endTime)) - .endObject() - ); - assertEquals(indexResponse.getResult(), Result.CREATED); - } - - public void testBadStartTime() throws IOException { - long endTime = System.currentTimeMillis(); - long startTime = endTime - TimeUnit.DAYS.toMillis(1); - - Settings s = Settings.builder() - .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), startTime) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), endTime) - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - - XContentBuilder mappings = XContentFactory.jsonBuilder(); - mappings.startObject(); - { - mappings.startObject("_doc"); - { - mappings.startObject("properties"); - { - mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); - { - mappings.field("type", randomBoolean() ? "date" : "date_nanos"); - } - mappings.endObject(); - mappings.startObject("foo"); - { - mappings.field("type", "keyword"); - mappings.field("time_series_dimension", true); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - - String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - prepareCreate(index).setSettings(s).setMapping(mappings).get(); - ensureGreen(index); - - MapperParsingException e = expectThrows( - MapperParsingException.class, - () -> index( - index, - XContentFactory.jsonBuilder() - .startObject() - .field("foo", "bar") - .field("@timestamp", Math.max(startTime - randomLongBetween(1, 3), 0)) - .endObject() - ) - ); - assertThat(e.getRootCause().getMessage(), containsString("must be larger than")); - } - - public void testBadEndTime() throws IOException { - long endTime = System.currentTimeMillis(); - long startTime = endTime - TimeUnit.DAYS.toMillis(1); - - Settings s = Settings.builder() - .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), startTime) - .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), endTime) - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - - XContentBuilder mappings = XContentFactory.jsonBuilder(); - mappings.startObject(); - { - mappings.startObject("_doc"); - { - mappings.startObject("properties"); - { - mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); - { - mappings.field("type", randomBoolean() ? "date" : "date_nanos"); - } - mappings.endObject(); - mappings.startObject("foo"); - { - mappings.field("type", "keyword"); - mappings.field("time_series_dimension", true); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - - String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - prepareCreate(index).setSettings(s).setMapping(mappings).get(); - ensureGreen(index); - - MapperParsingException e = expectThrows( - MapperParsingException.class, - () -> index( - index, - XContentFactory.jsonBuilder() - .startObject() - .field("foo", "bar") - .field("@timestamp", endTime + randomLongBetween(0, 3)) - .endObject() - ) - ); - assertThat(e.getRootCause().getMessage(), containsString("must be smaller than")); - } - - public void testEnabledTimeStampMapper() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - XContentBuilder mappings = XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject(DataStreamTimestampFieldMapper.NAME); - if (randomBoolean()) { - mappings.field("enabled", true); - } else { - mappings.field("enabled", "true"); - } - mappings.endObject().endObject().endObject(); - - String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - prepareCreate(index).setSettings(s).setMapping(mappings).get(); - ensureGreen(index); - - GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).get(); - XContentBuilder expect = XContentFactory.jsonBuilder(); - expect.startObject(); - { - expect.startObject("_doc"); - { - expect.startObject(DataStreamTimestampFieldMapper.NAME); - { - expect.field("enabled", true); - } - expect.endObject(); - expect.startObject("properties"); - { - expect.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); - { - expect.field("type", "date"); - } - expect.endObject(); - } - expect.endObject(); - } - expect.endObject(); - } - expect.endObject(); - assertThat(getMappingsResponse.getMappings().get(index).source().string(), equalTo(Strings.toString(expect))); - } - - @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/80918") - public void testAddTimeStampMeta() throws IOException { - Settings s = Settings.builder() - .put(IndexSettings.MODE.getKey(), "time_series") - .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "foo") - .build(); - - XContentBuilder mappings = XContentFactory.jsonBuilder(); - mappings.startObject(); - { - mappings.startObject("_doc"); - { - mappings.startObject(DataStreamTimestampFieldMapper.NAME); - { - mappings.field("enabled", true); - } - mappings.endObject(); - mappings.startObject("properties"); - { - mappings.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); - { - mappings.field("type", "date"); - mappings.startObject("meta"); - { - mappings.field("field_meta", "time_series"); - } - mappings.endObject(); - } - mappings.endObject(); - mappings.startObject("foo"); - { - mappings.field("type", "keyword"); - mappings.field("time_series_dimension", true); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - } - mappings.endObject(); - - String index = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - prepareCreate(index).setSettings(s).setMapping(mappings).get(); - ensureGreen(index); - - IndexResponse indexResponse = index( - index, - XContentFactory.jsonBuilder() - .startObject() - .field("foo", "bar") - .field("@timestamp", System.currentTimeMillis()) - .field("new_field", "value") - .endObject() - ); - assertEquals(indexResponse.getResult(), Result.CREATED); - - XContentBuilder expect = XContentFactory.jsonBuilder(); - expect.startObject(); - { - expect.startObject("_doc"); - { - expect.startObject(DataStreamTimestampFieldMapper.NAME); - { - expect.field("enabled", true); - } - expect.endObject(); - expect.startObject("properties"); - { - expect.startObject(DataStreamTimestampFieldMapper.DEFAULT_PATH); - { - expect.field("type", "date"); - expect.startObject("meta"); - { - expect.field("field_meta", "time_series"); - } - expect.endObject(); - } - expect.endObject(); - expect.startObject("foo"); - { - expect.field("type", "keyword"); - expect.field("time_series_dimension", true); - } - expect.endObject(); - expect.startObject("new_field"); - { - expect.field("type", "text"); - expect.startObject("fields"); - { - expect.startObject("keyword"); - { - expect.field("type", "keyword"); - expect.field("ignore_above", 256); - } - expect.endObject(); - } - expect.endObject(); - } - expect.endObject(); - } - expect.endObject(); - } - expect.endObject(); - } - expect.endObject(); - GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).get(); - assertThat(getMappingsResponse.getMappings().get(index).source().string(), equalTo(Strings.toString(expect))); - } - -} From 244f4d3e88c48099eeb631e545b265fc48e3a292 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 23 Nov 2021 19:14:23 +0000 Subject: [PATCH 26/88] Explicit write methods for always-missing values (#80958) Today we use `writeBoolean(false);` to write to a `StreamOutput` an optional value that is always missing. It's something of an implementation detail that a missing value is indicated by a `false` (i.e. a zero byte) so this commit wraps these calls in methods that better indicate the intent. Relates #80944 Relates #80692 --- .../common/io/stream/StreamOutput.java | 13 +++++++++++++ .../transport/ActionTransportException.java | 4 ++-- .../transport/ConnectTransportException.java | 2 +- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 66d8c748ffa61..992d88c04159c 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -1203,4 +1203,17 @@ public void writeOptionalTimeValue(@Nullable TimeValue timeValue) throws IOExcep } } + /** + * Similar to {@link #writeOptionalWriteable} but for use when the value is always missing. + */ + public void writeMissingWriteable(Class ignored) throws IOException { + writeBoolean(false); + } + + /** + * Similar to {@link #writeOptionalString} but for use when the value is always missing. + */ + public void writeMissingString() throws IOException { + writeBoolean(false); + } } diff --git a/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java b/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java index eb6296137b1a4..8f6f0fd477b1a 100644 --- a/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java +++ b/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java @@ -46,8 +46,8 @@ public ActionTransportException(String name, InetSocketAddress address, String a public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); if (out.getVersion().before(Version.V_8_1_0)) { - out.writeBoolean(false); // optional transport address - out.writeBoolean(false); // optional action + out.writeMissingWriteable(TransportAddress.class); + out.writeMissingString(); // action } } diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java index aa480d0c4ad0e..eddd6d6f108ba 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java @@ -44,7 +44,7 @@ public ConnectTransportException(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); if (out.getVersion().before(Version.V_8_1_0)) { - out.writeBoolean(false); // optional & unused node field + out.writeMissingWriteable(DiscoveryNode.class); } } } From cbcd9010962e3b5f230219d862041382f9151fa3 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 23 Nov 2021 14:51:28 -0500 Subject: [PATCH 27/88] [DOCS] Relocate `index.mapping.dimension_fields.limit` setting docs (#80964) Moves `index.mapping.dimension_fields.limit` so that its co-located with other mapping limit settings. --- docs/reference/index-modules.asciidoc | 16 ---------------- .../mapping/mapping-settings-limit.asciidoc | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 771fe2f8158d6..50be1e0e34314 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -329,22 +329,6 @@ indicates no ingest pipeline will run. NOTE: You can't use a final pipeline to change the `_index` field. If the pipeline attempts to change the `_index` field, the indexing request will fail. -[[index-mapping-dimension-fields-limit]] -`index.mapping.dimension_fields.limit`:: -+ --- -experimental:[] - -.For internal use by Elastic only. -[%collapsible] -==== -Maximum number of time series dimensions for the index. Defaults to `16`. - -You can mark a field as a dimension using the `time_series_dimension` mapping -parameter. -==== --- - [[index-hidden]] `index.hidden`:: Indicates whether the index should be hidden by default. Hidden indices are not diff --git a/docs/reference/mapping/mapping-settings-limit.asciidoc b/docs/reference/mapping/mapping-settings-limit.asciidoc index bdde5ca744936..706efc29ed5c8 100644 --- a/docs/reference/mapping/mapping-settings-limit.asciidoc +++ b/docs/reference/mapping/mapping-settings-limit.asciidoc @@ -47,3 +47,19 @@ If your field mappings contain a large, arbitrary set of keys, consider using th It usually shouldn't be necessary to set this setting. The default is okay unless a user starts to add a huge number of fields with really long names. Default is `Long.MAX_VALUE` (no limit). + +[[index-mapping-dimension-fields-limit]] +`index.mapping.dimension_fields.limit`:: ++ +-- +experimental:[] (<>, integer) + +.For internal use by Elastic only. +[%collapsible] +==== +Maximum number of time series dimensions for the index. Defaults to `16`. + +You can mark a field as a dimension using the `time_series_dimension` mapping +parameter. +==== +-- From 27e1e5b92fd1e975ef60be6b0df8760dfc2b8e26 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 23 Nov 2021 15:52:57 -0500 Subject: [PATCH 28/88] TSDB: fix error without feature flag (#80945) When you haven't enable the tsdb feature flag we would refuse to start. That's bad because we will likely release with the feature flag disabled. This should get us starting again. It fixes: * We tried to register a settings update consumer for the `end_time` for the tsdb index even when the `end_time` setting wasn't registered. * Pass the tsdb feature flag to internal cluster tests. --- server/build.gradle | 3 +++ .../src/main/java/org/elasticsearch/index/IndexSettings.java | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/server/build.gradle b/server/build.gradle index 0f9798c53f22d..9a87155eb86c1 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -132,6 +132,9 @@ if (BuildParams.isSnapshotBuild() == false) { tasks.named("test").configure { systemProperty 'es.index_mode_feature_flag_registered', 'true' } + tasks.named("internalClusterTest").configure { + systemProperty 'es.index_mode_feature_flag_registered', 'true' + } } tasks.named("thirdPartyAudit").configure { diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 8b56a9e3d292b..6b3f21b7abb2e 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -813,7 +813,9 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_DEPTH_LIMIT_SETTING, this::setMappingDepthLimit); scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING, this::setMappingFieldNameLengthLimit); scopedSettings.addSettingsUpdateConsumer(INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING, this::setMappingDimensionFieldsLimit); - scopedSettings.addSettingsUpdateConsumer(TIME_SERIES_END_TIME, this::updateTimeSeriesEndTime); + if (IndexSettings.isTimeSeriesModeEnabled()) { + scopedSettings.addSettingsUpdateConsumer(TIME_SERIES_END_TIME, this::updateTimeSeriesEndTime); + } } private void setSearchIdleAfter(TimeValue searchIdleAfter) { From 72ded7018885a20f00cb701d126117dcff593353 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Tue, 23 Nov 2021 12:53:20 -0800 Subject: [PATCH 29/88] Increase docker compose timeouts for CI builds --- .ci/jobs.t/defaults.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.ci/jobs.t/defaults.yml b/.ci/jobs.t/defaults.yml index fa59584cefeed..2a6512dc020b4 100644 --- a/.ci/jobs.t/defaults.yml +++ b/.ci/jobs.t/defaults.yml @@ -53,6 +53,7 @@ url: https://github.com/elastic/elasticsearch/ - inject: properties-content: | + COMPOSE_HTTP_TIMEOUT=120 JOB_BRANCH=%BRANCH% HOME=$JENKINS_HOME GRADLEW=./gradlew --parallel --scan --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ From dd2424b79c066396a6b1e38b6d1fc705355b19f2 Mon Sep 17 00:00:00 2001 From: Colin Ng Date: Tue, 23 Nov 2021 13:28:53 -0800 Subject: [PATCH 30/88] Fix typo (#80925) --- docs/reference/mapping/runtime.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/runtime.asciidoc b/docs/reference/mapping/runtime.asciidoc index 0874ac9b74511..aff97aa810a1d 100644 --- a/docs/reference/mapping/runtime.asciidoc +++ b/docs/reference/mapping/runtime.asciidoc @@ -1477,7 +1477,7 @@ timestamp falls within the defined range. ==== Define a runtime field with a dissect pattern If you don't need the power of regular expressions, you can use <> instead of grok patterns. Dissect -patterns match on fixed delimiters but are typically faster that grok. +patterns match on fixed delimiters but are typically faster than grok. You can use dissect to achieve the same results as parsing the Apache logs with a <>. Instead of matching on a log From e94ec18d78e072f651b517f3b79297f898aa8cb2 Mon Sep 17 00:00:00 2001 From: weizijun Date: Wed, 24 Nov 2021 16:21:03 +0800 Subject: [PATCH 31/88] fixup (#80901) a tiny catch for a rollover test --- .../action/admin/indices/rollover/ConditionTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java index dd8464803aa90..edd7403016d24 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java @@ -40,7 +40,7 @@ public void testMaxDocs() { assertThat(evaluate.matched, equalTo(true)); long maxDocsNotMatch = randomIntBetween(0, 99); - evaluate = maxDocsCondition.evaluate(new Condition.Stats(0, maxDocsNotMatch, randomByteSize(), randomByteSize())); + evaluate = maxDocsCondition.evaluate(new Condition.Stats(maxDocsNotMatch, 0, randomByteSize(), randomByteSize())); assertThat(evaluate.condition, equalTo(maxDocsCondition)); assertThat(evaluate.matched, equalTo(false)); } From e6e812c75f3d6e30525dc0fb26cc1f87efcc3826 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Wed, 24 Nov 2021 09:51:13 +0000 Subject: [PATCH 32/88] Allow to set the security manager (#80957) Post JDK 17 the security manager is disabled by default - setSecurityManager throws UOE - see JEP 411. This change adds a command line option to explicitly allow to set the security manager, which enables early testing with releases greater than JDK 17. --- .../gradle/internal/ElasticsearchTestBasePlugin.java | 1 + .../org/elasticsearch/tools/launchers/SystemJvmOptions.java | 2 ++ 2 files changed, 3 insertions(+) diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 840a35b4dea6c..ebd9841f2108b 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -97,6 +97,7 @@ public void execute(Task t) { "-Xmx" + System.getProperty("tests.heap.size", "512m"), "-Xms" + System.getProperty("tests.heap.size", "512m"), "--illegal-access=deny", + "-Djava.security.manager=allow", // TODO: only open these for mockito when it is modularized "--add-opens=java.base/java.security.cert=ALL-UNNAMED", "--add-opens=java.base/java.nio.channels=ALL-UNNAMED", diff --git a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/SystemJvmOptions.java index e6d50f9bda03a..d5ebf57d6968b 100644 --- a/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/SystemJvmOptions.java +++ b/distribution/tools/launchers/src/main/java/org/elasticsearch/tools/launchers/SystemJvmOptions.java @@ -27,6 +27,8 @@ static List systemJvmOptions() { * networkaddress.cache.negative ttl; set to -1 to cache forever. */ "-Des.networkaddress.cache.negative.ttl=10", + // Allow to set the security manager. + "-Djava.security.manager=allow", // pre-touch JVM emory pages during initialization "-XX:+AlwaysPreTouch", // explicitly set the stack size From 89946c51b3b65146e4a3671379994eaf8a4ce065 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Wed, 24 Nov 2021 09:54:15 +0000 Subject: [PATCH 33/88] [Transform] Remove legacy transform templates (#80948) Legacy transform templates will exist in clusters where transforms were first used prior to 7.16.0. (From 7.16.0 transforms uses composable templates.) Unlike ML, there's no danger these legacy templates date back to 6.x and contain types, so they are not at risk of being broken in 8.x. But it's still good to remove them from the cluster to keep it as clean as possible. --- .../xpack/transform/Transform.java | 20 +++++ .../xpack/restart/FullClusterRestartIT.java | 78 +++++++++++++++++++ 2 files changed, 98 insertions(+) diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java index 1cac435918727..22deda76cff4a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java @@ -21,6 +21,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.OriginSettingClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; @@ -109,7 +110,9 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.function.Supplier; +import java.util.function.UnaryOperator; import static org.elasticsearch.xpack.core.ClientHelper.TRANSFORM_ORIGIN; import static org.elasticsearch.xpack.core.transform.TransformMessages.FAILED_TO_UNSET_RESET_MODE; @@ -287,6 +290,23 @@ public List getNamedXContent() { return new TransformNamedXContentProvider().getNamedXContentParsers(); } + @Override + public UnaryOperator> getIndexTemplateMetadataUpgrader() { + return templates -> { + // These are all legacy templates that were created in old versions. None are needed now. + // The "internal" indices became system indices and the "notifications" indices now use composable templates. + templates.remove(".data-frame-internal-1"); + templates.remove(".data-frame-internal-2"); + templates.remove(".transform-internal-003"); + templates.remove(".transform-internal-004"); + templates.remove(".transform-internal-005"); + templates.remove(".data-frame-notifications-1"); + templates.remove(".transform-notifications-000001"); + templates.remove(".transform-notifications-000002"); + return templates; + }; + } + @Override public Collection getSystemIndexDescriptors(Settings settings) { try { diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index d64da74fe9080..0336538124e6b 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -51,6 +51,7 @@ import static org.elasticsearch.upgrades.FullClusterRestartIT.assertNumHits; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -390,6 +391,83 @@ public void testRollupAfterRestart() throws Exception { } } + public void testTransformLegacyTemplateCleanup() throws Exception { + assumeTrue("Before 7.2 transforms didn't exist", getOldClusterVersion().onOrAfter(Version.V_7_2_0)); + if (isRunningAgainstOldCluster()) { + + // create the source index + final Request createIndexRequest = new Request("PUT", "customers"); + createIndexRequest.setJsonEntity( + "{" + + "\"mappings\": {" + + " \"properties\": {" + + " \"customer_id\": { \"type\": \"keyword\" }," + + " \"price\": { \"type\": \"double\" }" + + " }" + + "}" + + "}" + ); + + Map createIndexResponse = entityAsMap(client().performRequest(createIndexRequest)); + assertThat(createIndexResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + + // create a transform + String endpoint = getOldClusterVersion().onOrAfter(Version.V_7_5_0) + ? "_transform/transform-full-cluster-restart-test" + : "_data_frame/transforms/transform-full-cluster-restart-test"; + final Request createTransformRequest = new Request("PUT", endpoint); + + createTransformRequest.setJsonEntity( + "{" + + "\"source\":{" + + " \"index\":\"customers\"" + + "}," + + "\"description\":\"testing\"," + + "\"dest\":{" + + " \"index\":\"max_price\"" + + "}," + + "\"pivot\": {" + + " \"group_by\":{" + + " \"customer_id\":{" + + " \"terms\":{" + + " \"field\":\"customer_id\"" + + " }" + + " }" + + " }," + + " \"aggregations\":{" + + " \"max_price\":{" + + " \"max\":{" + + " \"field\":\"price\"" + + " }" + + " }" + + " }" + + "}" + + "}" + ); + + Map createTransformResponse = entityAsMap(client().performRequest(createTransformRequest)); + assertThat(createTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + } else { + // legacy index templates created in previous releases should not be present anymore + assertBusy(() -> { + Request request = new Request("GET", "/_template/.transform-*,.data-frame-*"); + try { + Response response = client().performRequest(request); + Map responseLevel = entityAsMap(response); + assertNotNull(responseLevel); + assertThat(responseLevel.keySet(), empty()); + } catch (ResponseException e) { + // not found is fine + assertThat( + "Unexpected failure getting templates: " + e.getResponse().getStatusLine(), + e.getResponse().getStatusLine().getStatusCode(), + is(404) + ); + } + }); + } + } + public void testSlmPolicyAndStats() throws IOException { SnapshotLifecyclePolicy slmPolicy = new SnapshotLifecyclePolicy( "test-policy", From 0f108251341303dbf4afabaadc8f1d673a368b0c Mon Sep 17 00:00:00 2001 From: Ievgen Degtiarenko Date: Wed, 24 Nov 2021 12:19:04 +0100 Subject: [PATCH 34/88] clear auto-follow errors on deleting pattern (#80544) --- .../ccr/action/AutoFollowCoordinator.java | 43 +- ...ransportDeleteAutoFollowPatternAction.java | 37 +- .../action/AutoFollowCoordinatorTests.java | 642 +++++------------- 3 files changed, 220 insertions(+), 502 deletions(-) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index f514694e83396..38f7603255d6a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -84,12 +84,28 @@ public class AutoFollowCoordinator extends AbstractLifecycleComponent implements private volatile TimeValue waitForMetadataTimeOut; private volatile Map autoFollowers = Collections.emptyMap(); + private volatile Set patterns = Set.of(); // The following fields are read and updated under a lock: private long numberOfSuccessfulIndicesAutoFollowed = 0; private long numberOfFailedIndicesAutoFollowed = 0; private long numberOfFailedRemoteClusterStateRequests = 0; - private final LinkedHashMap> recentAutoFollowErrors; + private final LinkedHashMap> recentAutoFollowErrors; + + private static final class AutoFollowErrorKey { + private final String pattern; + private final String index; + + private AutoFollowErrorKey(String pattern, String index) { + this.pattern = Objects.requireNonNull(pattern); + this.index = index; + } + + @Override + public String toString() { + return index != null ? pattern + ':' + index : pattern; + } + } public AutoFollowCoordinator( final Settings settings, @@ -109,7 +125,7 @@ public AutoFollowCoordinator( this.executor = Objects.requireNonNull(executor); this.recentAutoFollowErrors = new LinkedHashMap<>() { @Override - protected boolean removeEldestEntry(final Map.Entry> eldest) { + protected boolean removeEldestEntry(final Map.Entry> eldest) { return size() > MAX_AUTO_FOLLOW_ERRORS; } }; @@ -162,21 +178,31 @@ public synchronized AutoFollowStats getStats() { } } + var recentAutoFollowErrorsCopy = new TreeMap>(); + for (var entry : recentAutoFollowErrors.entrySet()) { + recentAutoFollowErrorsCopy.put(entry.getKey().toString(), entry.getValue()); + } + return new AutoFollowStats( numberOfFailedIndicesAutoFollowed, numberOfFailedRemoteClusterStateRequests, numberOfSuccessfulIndicesAutoFollowed, - new TreeMap<>(recentAutoFollowErrors), + recentAutoFollowErrorsCopy, timesSinceLastAutoFollowPerRemoteCluster ); } synchronized void updateStats(List results) { + // purge stats for removed patterns + var currentPatterns = this.patterns; + recentAutoFollowErrors.keySet().removeIf(key -> currentPatterns.contains(key.pattern) == false); + // add new stats long newStatsReceivedTimeStamp = absoluteMillisTimeProvider.getAsLong(); for (AutoFollowResult result : results) { + var onlyPatternKey = new AutoFollowErrorKey(result.autoFollowPatternName, null); if (result.clusterStateFetchException != null) { recentAutoFollowErrors.put( - result.autoFollowPatternName, + onlyPatternKey, Tuple.tuple(newStatsReceivedTimeStamp, new ElasticsearchException(result.clusterStateFetchException)) ); numberOfFailedRemoteClusterStateRequests++; @@ -188,9 +214,9 @@ synchronized void updateStats(List results) { result.clusterStateFetchException ); } else { - recentAutoFollowErrors.remove(result.autoFollowPatternName); + recentAutoFollowErrors.remove(onlyPatternKey); for (Map.Entry entry : result.autoFollowExecutionResults.entrySet()) { - final String patternAndIndexKey = result.autoFollowPatternName + ":" + entry.getKey().getName(); + var patternAndIndexKey = new AutoFollowErrorKey(result.autoFollowPatternName, entry.getKey().getName()); if (entry.getValue() != null) { numberOfFailedIndicesAutoFollowed++; recentAutoFollowErrors.put( @@ -199,7 +225,7 @@ synchronized void updateStats(List results) { ); LOGGER.warn( new ParameterizedMessage( - "failure occurred while auto following index [{}] for auto follow " + "pattern [{}]", + "failure occurred while auto following index [{}] for auto follow pattern [{}]", entry.getKey(), result.autoFollowPatternName ), @@ -211,7 +237,6 @@ synchronized void updateStats(List results) { } } } - } } @@ -227,6 +252,8 @@ void updateAutoFollowers(ClusterState followerClusterState) { return; } + this.patterns = Set.copyOf(autoFollowMetadata.getPatterns().keySet()); + final CopyOnWriteHashMap autoFollowersCopy = CopyOnWriteHashMap.copyOf(this.autoFollowers); Set newRemoteClusters = autoFollowMetadata.getPatterns() .values() diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java index a791028b22ce7..980afd99977c9 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java @@ -19,17 +19,13 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; -import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata.AutoFollowPattern; import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - public class TransportDeleteAutoFollowPatternAction extends AcknowledgedTransportMasterNodeAction { @Inject @@ -72,28 +68,23 @@ public ClusterState execute(ClusterState currentState) { static ClusterState innerDelete(DeleteAutoFollowPatternAction.Request request, ClusterState currentState) { AutoFollowMetadata currentAutoFollowMetadata = currentState.metadata().custom(AutoFollowMetadata.TYPE); - if (currentAutoFollowMetadata == null) { - throw new ResourceNotFoundException("auto-follow pattern [{}] is missing", request.getName()); - } - Map patterns = currentAutoFollowMetadata.getPatterns(); - AutoFollowPattern autoFollowPatternToRemove = patterns.get(request.getName()); - if (autoFollowPatternToRemove == null) { + if (currentAutoFollowMetadata == null || currentAutoFollowMetadata.getPatterns().get(request.getName()) == null) { throw new ResourceNotFoundException("auto-follow pattern [{}] is missing", request.getName()); } - final Map patternsCopy = new HashMap<>(patterns); - final Map> followedLeaderIndexUUIDSCopy = new HashMap<>( - currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs() - ); - final Map> headers = new HashMap<>(currentAutoFollowMetadata.getHeaders()); - patternsCopy.remove(request.getName()); - followedLeaderIndexUUIDSCopy.remove(request.getName()); - headers.remove(request.getName()); + AutoFollowMetadata newAutoFollowMetadata = removePattern(currentAutoFollowMetadata, request.getName()); - AutoFollowMetadata newAutoFollowMetadata = new AutoFollowMetadata(patternsCopy, followedLeaderIndexUUIDSCopy, headers); - ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metadata(Metadata.builder(currentState.getMetadata()).putCustom(AutoFollowMetadata.TYPE, newAutoFollowMetadata).build()); - return newState.build(); + return ClusterState.builder(currentState) + .metadata(Metadata.builder(currentState.getMetadata()).putCustom(AutoFollowMetadata.TYPE, newAutoFollowMetadata)) + .build(); + } + + private static AutoFollowMetadata removePattern(AutoFollowMetadata metadata, String name) { + return new AutoFollowMetadata( + Maps.copyMapWithRemovedEntry(metadata.getPatterns(), name), + Maps.copyMapWithRemovedEntry(metadata.getFollowedLeaderIndexUUIDs(), name), + Maps.copyMapWithRemovedEntry(metadata.getHeaders(), name) + ); } @Override diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index baf6b5906af74..671c920270a9f 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -96,24 +96,7 @@ public void testAutoFollower() { ClusterState remoteState = createRemoteClusterState("logs-20190101", true); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -183,24 +166,7 @@ public void testAutoFollower_dataStream() { ClusterState remoteState = createRemoteClusterStateWithDataStream("logs-foobar"); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -268,24 +234,7 @@ public void testAutoFollowerClusterStateApiFailure() { Client client = mock(Client.class); when(client.getRemoteClusterClient(anyString())).thenReturn(client); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -335,24 +284,7 @@ public void testAutoFollowerUpdateClusterStateFailure() { when(client.getRemoteClusterClient(anyString())).thenReturn(client); ClusterState remoteState = createRemoteClusterState("logs-20190101", true); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -715,24 +647,7 @@ public void testAutoFollowerCreateAndFollowApiCallFailure() { when(client.getRemoteClusterClient(anyString())).thenReturn(client); ClusterState remoteState = createRemoteClusterState("logs-20190101", true); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -790,24 +705,7 @@ void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List pa } public void testGetLeaderIndicesToFollow() { - final AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("metrics-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + final AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "metrics-*"); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); Metadata.Builder imdBuilder = Metadata.builder(); @@ -882,24 +780,7 @@ public void testGetLeaderIndicesToFollow() { } public void testGetLeaderIndicesToFollow_shardsNotStarted() { - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "*"); // 1 shard started and another not started: ClusterState remoteState = createRemoteClusterState("index1", true); @@ -934,24 +815,7 @@ public void testGetLeaderIndicesToFollow_shardsNotStarted() { } public void testGetLeaderIndicesToFollowWithClosedIndices() { - final AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + final AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "*"); // index is opened ClusterState remoteState = ClusterStateCreationUtils.stateWithActivePrimary("test-index", true, randomIntBetween(1, 3), 0); @@ -1090,24 +954,8 @@ public void testCleanFollowedLeaderIndicesNoEntry() { } public void testGetFollowerIndexName() { - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("metrics-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "metrics-*"); + assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("metrics-0")); autoFollowPattern = new AutoFollowPattern( @@ -1152,15 +1000,7 @@ public void testGetFollowerIndexName() { } public void testStats() { - AutoFollowCoordinator autoFollowCoordinator = new AutoFollowCoordinator( - Settings.EMPTY, - null, - mockClusterService(), - new CcrLicenseChecker(() -> true, () -> false), - () -> 1L, - () -> 1L, - Runnable::run - ); + AutoFollowCoordinator autoFollowCoordinator = createAutoFollowCoordinator(); autoFollowCoordinator.updateStats(Collections.singletonList(new AutoFollowCoordinator.AutoFollowResult("_alias1"))); AutoFollowStats autoFollowStats = autoFollowCoordinator.getStats(); @@ -1244,69 +1084,9 @@ public void testUpdateAutoFollowers() { ); // Add 3 patterns: Map patterns = new HashMap<>(); - patterns.put( - "pattern1", - new AutoFollowPattern( - "remote1", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); - patterns.put( - "pattern2", - new AutoFollowPattern( - "remote2", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); - patterns.put( - "pattern3", - new AutoFollowPattern( - "remote2", - Collections.singletonList("metrics-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); + patterns.put("pattern1", createAutoFollowPattern("remote1", "logs-*")); + patterns.put("pattern2", createAutoFollowPattern("remote2", "logs-*")); + patterns.put("pattern3", createAutoFollowPattern("remote2", "metrics-*")); ClusterState clusterState = ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() @@ -1335,27 +1115,7 @@ public void testUpdateAutoFollowers() { assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().get("remote2"), notNullValue()); assertThat(removedAutoFollower1.removed, is(true)); // Add pattern 4: - patterns.put( - "pattern4", - new AutoFollowPattern( - "remote1", - Collections.singletonList("metrics-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); + patterns.put("pattern4", createAutoFollowPattern("remote1", "metrics-*")); clusterState = ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() @@ -1388,15 +1148,7 @@ public void testUpdateAutoFollowers() { } public void testUpdateAutoFollowersNoPatterns() { - AutoFollowCoordinator autoFollowCoordinator = new AutoFollowCoordinator( - Settings.EMPTY, - null, - mockClusterService(), - new CcrLicenseChecker(() -> true, () -> false), - () -> 1L, - () -> 1L, - Runnable::run - ); + AutoFollowCoordinator autoFollowCoordinator = createAutoFollowCoordinator(); ClusterState clusterState = ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() @@ -1411,15 +1163,7 @@ public void testUpdateAutoFollowersNoPatterns() { } public void testUpdateAutoFollowersNoAutoFollowMetadata() { - AutoFollowCoordinator autoFollowCoordinator = new AutoFollowCoordinator( - Settings.EMPTY, - null, - mockClusterService(), - new CcrLicenseChecker(() -> true, () -> false), - () -> 1L, - () -> 1L, - Runnable::run - ); + AutoFollowCoordinator autoFollowCoordinator = createAutoFollowCoordinator(); ClusterState clusterState = ClusterState.builder(new ClusterName("remote")).build(); autoFollowCoordinator.updateAutoFollowers(clusterState); assertThat(autoFollowCoordinator.getStats().getAutoFollowedClusters().size(), equalTo(0)); @@ -1442,69 +1186,9 @@ public void testUpdateAutoFollowersNoActivePatterns() { // Add 3 patterns: Map patterns = new HashMap<>(); - patterns.put( - "pattern1", - new AutoFollowPattern( - "remote1", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); - patterns.put( - "pattern2", - new AutoFollowPattern( - "remote2", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); - patterns.put( - "pattern3", - new AutoFollowPattern( - "remote2", - Collections.singletonList("metrics-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); + patterns.put("pattern1", createAutoFollowPattern("remote1", "logs-*")); + patterns.put("pattern2", createAutoFollowPattern("remote2", "logs-*")); + patterns.put("pattern3", createAutoFollowPattern("remote2", "metrics-*")); autoFollowCoordinator.updateAutoFollowers( ClusterState.builder(new ClusterName("remote")) @@ -1587,27 +1271,7 @@ public void testUpdateAutoFollowersNoActivePatterns() { assertThat(removedAutoFollower2.removed, is(false)); // Add active pattern 4 and make pattern 2 inactive - patterns.put( - "pattern4", - new AutoFollowPattern( - "remote1", - Collections.singletonList("metrics-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ); + patterns.put("pattern4", createAutoFollowPattern("remote1", "metrics-*")); patterns.computeIfPresent( "pattern2", (name, pattern) -> new AutoFollowPattern( @@ -1670,24 +1334,7 @@ public void testWaitForMetadataVersion() { Client client = mock(Client.class); when(client.getRemoteClusterClient(anyString())).thenReturn(client); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -1751,24 +1398,7 @@ public void testWaitForTimeOut() { Client client = mock(Client.class); when(client.getRemoteClusterClient(anyString())).thenReturn(client); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -1822,24 +1452,7 @@ public void testAutoFollowerSoftDeletesDisabled() { ClusterState remoteState = createRemoteClusterState("logs-20190101", false); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -1905,24 +1518,7 @@ public void testAutoFollowerFollowerIndexAlreadyExists() { ClusterState remoteState = createRemoteClusterState("logs-20190101", true); - AutoFollowPattern autoFollowPattern = new AutoFollowPattern( - "remote", - Collections.singletonList("logs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ); + AutoFollowPattern autoFollowPattern = createAutoFollowPattern("remote", "logs-*"); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -2094,27 +1690,7 @@ public void testClosedIndicesAreNotAutoFollowed() { .putCustom( AutoFollowMetadata.TYPE, new AutoFollowMetadata( - Map.of( - pattern, - new AutoFollowPattern( - "remote", - List.of("docs-*"), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ), + Map.of(pattern, createAutoFollowPattern("remote", "docs-*")), Map.of(pattern, List.of()), Map.of(pattern, Map.of()) ) @@ -2350,6 +1926,150 @@ public void testFollowerIndexIsCreatedInExecuteAutoFollow() { assertThat(autoFollowResults.v2().contains(indexName), equalTo(true)); } + public void testRemovesClusterLevelErrorsOnRemovingAutoFollowPattern() { + // given auto-follow pattern added + var pattern1 = createAutoFollowPattern("remote1", "logs-*"); + var pattern2 = createAutoFollowPattern("remote2", "logs-*"); + var pattern3 = createAutoFollowPattern("remote2", "metrics-*");// same remote + + var autoFollowCoordinator = createAutoFollowCoordinator(); + autoFollowCoordinator.updateAutoFollowers( + createClusterStateWith(Map.of("pattern1", pattern1, "pattern2", pattern2, "pattern3", pattern3)) + ); + + // and stats are published + autoFollowCoordinator.updateStats( + List.of( + new AutoFollowCoordinator.AutoFollowResult("pattern1", new RuntimeException("ClusterStateFetchException")), + new AutoFollowCoordinator.AutoFollowResult("pattern2", new RuntimeException("ClusterStateFetchException")), + new AutoFollowCoordinator.AutoFollowResult("pattern3", new RuntimeException("ClusterStateFetchException")) + ) + ); + + // when auto-follow pattern `pattern3` is removed + var before = autoFollowCoordinator.getStats(); + autoFollowCoordinator.updateAutoFollowers(createClusterStateWith(Map.of("pattern1", pattern1, "pattern2", pattern2))); + autoFollowCoordinator.updateStats(List.of());// actually triggers the purge + var after = autoFollowCoordinator.getStats(); + + // then stats are removed as well (but only for the removed pattern) + assertThat(before.getRecentAutoFollowErrors().keySet(), equalTo(Set.of("pattern1", "pattern2", "pattern3"))); + assertThat(after.getRecentAutoFollowErrors().keySet(), equalTo(Set.of("pattern1", "pattern2"))); + } + + public void testRemovesIndexLevelErrorsOnRemovingAutoFollowPattern() { + // given auto-follow pattern added + var pattern1 = createAutoFollowPattern("remote1", "logs-*"); + var pattern2 = createAutoFollowPattern("remote2", "logs-*"); + var pattern3 = createAutoFollowPattern("remote2", "metrics-*");// same remote + + var autoFollowCoordinator = createAutoFollowCoordinator(); + autoFollowCoordinator.updateAutoFollowers( + createClusterStateWith(Map.of("pattern1", pattern1, "pattern2", pattern2, "pattern3", pattern3)) + ); + + // and stats are published + autoFollowCoordinator.updateStats( + List.of( + new AutoFollowCoordinator.AutoFollowResult( + "pattern1", + List.of(Tuple.tuple(new Index("logs-1", UUIDs.base64UUID()), new RuntimeException("AutoFollowExecutionException"))) + ), + new AutoFollowCoordinator.AutoFollowResult( + "pattern2", + List.of(Tuple.tuple(new Index("logs-1", UUIDs.base64UUID()), new RuntimeException("AutoFollowExecutionException"))) + ), + new AutoFollowCoordinator.AutoFollowResult( + "pattern3", + List.of(Tuple.tuple(new Index("metrics-1", UUIDs.base64UUID()), new RuntimeException("AutoFollowExecutionException"))) + ) + ) + ); + + // when auto-follow pattern `pattern3` is removed + var before = autoFollowCoordinator.getStats(); + autoFollowCoordinator.updateAutoFollowers(createClusterStateWith(Map.of("pattern1", pattern1, "pattern2", pattern2))); + autoFollowCoordinator.updateStats(List.of());// actually triggers the purge + var after = autoFollowCoordinator.getStats(); + + // then stats are removed as well (but only for the removed pattern) + assertThat( + before.getRecentAutoFollowErrors().keySet(), + equalTo(Set.of("pattern1:logs-1", "pattern2:logs-1", "pattern3:metrics-1")) + ); + assertThat(after.getRecentAutoFollowErrors().keySet(), equalTo(Set.of("pattern1:logs-1", "pattern2:logs-1"))); + } + + public void testRemovesErrorsIfPatternContainsColon() { + // given auto-follow pattern added + var pattern1 = createAutoFollowPattern("remote1", "logs-*"); + var pattern2 = createAutoFollowPattern("remote2", "logs-*"); + var pattern3 = createAutoFollowPattern("remote2", "metrics-*");// same remote + + var autoFollowCoordinator = createAutoFollowCoordinator(); + autoFollowCoordinator.updateAutoFollowers( + createClusterStateWith(Map.of("pattern:1", pattern1, "pattern:2", pattern2, "pattern:3", pattern3)) + ); + + // and stats are published + autoFollowCoordinator.updateStats( + List.of( + new AutoFollowCoordinator.AutoFollowResult("pattern:1", new RuntimeException("ClusterStateFetchException")), + new AutoFollowCoordinator.AutoFollowResult("pattern:2", new RuntimeException("ClusterStateFetchException")), + new AutoFollowCoordinator.AutoFollowResult("pattern:3", new RuntimeException("ClusterStateFetchException")) + ) + ); + + // when auto-follow pattern `pattern:3` is removed + var before = autoFollowCoordinator.getStats(); + autoFollowCoordinator.updateAutoFollowers(createClusterStateWith(Map.of("pattern:1", pattern1, "pattern:2", pattern2))); + autoFollowCoordinator.updateStats(List.of());// actually triggers the purge + var after = autoFollowCoordinator.getStats(); + + // then stats are removed as well (but only for the removed pattern) + assertThat(before.getRecentAutoFollowErrors().keySet(), equalTo(Set.of("pattern:1", "pattern:2", "pattern:3"))); + assertThat(after.getRecentAutoFollowErrors().keySet(), equalTo(Set.of("pattern:1", "pattern:2"))); + } + + private AutoFollowCoordinator createAutoFollowCoordinator() { + return new AutoFollowCoordinator( + Settings.EMPTY, + null, + mockClusterService(), + new CcrLicenseChecker(() -> true, () -> false), + () -> 1L, + () -> 1L, + Runnable::run + ); + } + + private ClusterState createClusterStateWith(Map patterns) { + return ClusterState.builder(new ClusterName("remote")) + .metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Map.of(), Map.of()))) + .build(); + } + + private AutoFollowPattern createAutoFollowPattern(String remoteCluster, String pattern) { + return new AutoFollowPattern( + remoteCluster, + List.of(pattern), + List.of(), + null, + Settings.EMPTY, + true, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ); + } + private Tuple, Set> executeAutoFollow( String indexPattern, ClusterState finalRemoteState @@ -2364,27 +2084,7 @@ private Tuple, Set> execute .putCustom( AutoFollowMetadata.TYPE, new AutoFollowMetadata( - Map.of( - pattern, - new AutoFollowPattern( - "remote", - List.of(indexPattern), - Collections.emptyList(), - null, - Settings.EMPTY, - true, - null, - null, - null, - null, - null, - null, - null, - null, - null, - null - ) - ), + Map.of(pattern, createAutoFollowPattern("remote", indexPattern)), Map.of(pattern, List.of()), Map.of(pattern, Map.of()) ) From 594fe00396ed70a2cd035775badbdb5ef2aecc98 Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Wed, 24 Nov 2021 13:57:32 +0000 Subject: [PATCH 35/88] Fix WatcherRestartIT (#80986) This removes the unnecessary watcher start call in the test to reduce the possibility of flakiness. The call is not needed as the Watcher plugin is active and it will install (and upgrade/remove) the templates regardless if watcher is started. --- .../test/java/org/elasticsearch/upgrades/WatcherRestartIT.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatcherRestartIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatcherRestartIT.java index 54921de6b9320..d0bced071bd78 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatcherRestartIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/WatcherRestartIT.java @@ -30,9 +30,6 @@ public void testWatcherRestart() throws Exception { } public void testEnsureWatcherDeletesLegacyTemplates() throws Exception { - client().performRequest(new Request("POST", "/_watcher/_start")); - ensureWatcherStarted(); - if (CLUSTER_TYPE.equals(ClusterType.UPGRADED)) { // legacy index template created in previous releases should not be present anymore assertBusy(() -> { From 99df2fbff456f0c8b3ad35f71899174bffacd107 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Wed, 24 Nov 2021 16:09:20 +0100 Subject: [PATCH 36/88] Fix flood stage with system indices (#80674) System indices do not allow setting changes. This made flood stage handling no longer work for nodes that host a shard of a system index. Now pass in an origin to ensure ES can mark indices located on a node above flood stage as "read-only / allow-delete", regardless of whether it is a system index or not. --- .../allocation/DiskThresholdMonitorIT.java | 82 +++++++++++++++++++ .../put/UpdateSettingsRequestBuilder.java | 8 ++ .../allocation/DiskThresholdMonitor.java | 1 + 3 files changed, 91 insertions(+) create mode 100644 server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java new file mode 100644 index 0000000000000..917742010e9be --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.ClusterInfoService; +import org.elasticsearch.cluster.DiskUsageIntegTestCase; +import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.tasks.TaskResultsService; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Locale; + +import static org.elasticsearch.index.store.Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class DiskThresholdMonitorIT extends DiskUsageIntegTestCase { + + private static final long FLOODSTAGE_BYTES = new ByteSizeValue(10, ByteSizeUnit.KB).getBytes(); + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal, otherSettings)) + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), FLOODSTAGE_BYTES * 2 + "b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), FLOODSTAGE_BYTES * 2 + "b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), FLOODSTAGE_BYTES + "b") + .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "0ms") + .build(); + } + + public void testFloodStageExceeded() throws Exception { + internalCluster().startMasterOnlyNode(); + final String dataNodeName = internalCluster().startDataOnlyNode(); + + final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster().getCurrentMasterNodeInstance( + ClusterInfoService.class + ); + + final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms") + .build() + ); + // ensure we have a system index on the data node too. + assertAcked(client().admin().indices().prepareCreate(TaskResultsService.TASK_INDEX)); + + getTestFileStore(dataNodeName).setTotalSpace(1L); + refreshClusterInfo(); + assertBusy(() -> { + assertBlocked( + client().prepareIndex().setIndex(indexName).setId("1").setSource("f", "g"), + IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK + ); + assertThat( + client().admin() + .indices() + .prepareGetSettings(indexName) + .setNames(IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE) + .get() + .getSetting(indexName, IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE), + equalTo("true") + ); + }); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java index 1e64c26f91b49..0ec646dcddd8e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java @@ -83,4 +83,12 @@ public UpdateSettingsRequestBuilder setPreserveExisting(boolean preserveExisting request.setPreserveExisting(preserveExisting); return this; } + + /** + * Sets the origin to use, only set this when the settings update is requested by ES internal processes. + */ + public UpdateSettingsRequestBuilder origin(String origin) { + request.origin(origin); + return this; + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index c5fb7d1307002..868d96c1c098c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -453,6 +453,7 @@ protected void updateIndicesReadOnly(Set indicesToUpdate, ActionListener .indices() .prepareUpdateSettings(indicesToUpdate.toArray(Strings.EMPTY_ARRAY)) .setSettings(readOnlySettings) + .origin("disk-threshold-monitor") .execute(wrappedListener.map(r -> null)); } From 9548bdbbbb85ccf9261ea0226a4d3161eb4ed346 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 24 Nov 2021 17:20:22 +0100 Subject: [PATCH 37/88] Fix flaky DynamicMappingIT testDynamicRuntimeObjectFields (#80999) The final part of this test checks that we throw a MapperParsingException when we try to index into a dynamic runtime object that has already been mapped to a different type. Under very rare circumstances this can fail when the mapping update that a previous document index operation has triggered hasn't been completely applied on the shard the second document is targeted at. In this case, indexing the second document can itself trigger a mapping merge operation that can fail with a different exception type (IAE) with a very similar message. In order to simplify the test and make it more robust we can use the same document id for both index requests, making sure we target the same shard group. Closes #80722 --- .../java/org/elasticsearch/index/mapper/DynamicMappingIT.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 38b0bed97457d..6bc6accbfa76f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -447,10 +447,13 @@ public void testDynamicRuntimeObjectFields() { ); // the parent object has been mapped dynamic:true, hence the field gets indexed + // we use a fixed doc id here to make sure this document and the one we sent later with a conflicting type + // target the same shard where we are sure the mapping update has been applied assertEquals( RestStatus.CREATED, client().prepareIndex("test") .setSource("obj.runtime.dynamic.number", 1) + .setId("id") .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get() .status() From 537d5cf940ee5fad71863382a8ece541b49176a1 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Wed, 24 Nov 2021 19:48:39 +0200 Subject: [PATCH 38/88] Rename `BaseTasksRequest.parentTaskId` to `targetParentTaskId` (#80908) `BaseTasksRequest` is a subclass of `TransportRequest`. Both of them contain a member variable `TaskId parentTaskId`. `TransportRequest` provides a setter for its member called `setParentTask` while `BaseTasksRequest` provides `setParentTaskId`. This is very confusing as it can easily lead to mistakes where the one is called in a place where the other should have been called. In particular, the `TransportRequest.parentTaskId` holds the task id of the parent task. `BaseTasksRequest.parentTaskId` holds the id of a task which is used to filter the tasks which are matched for that tasks request. This commit renames `BaseTasksRequest.parentTaskId` to `targetParentTaskId` to disambiguate the two concepts and prevent confusion and mistakes. It also renames `BaseTasksRequest.taskId` to `targetTaskId` to match. Co-authored-by: Elastic Machine --- .../client/TasksRequestConverters.java | 6 +- .../client/TasksRequestConvertersTests.java | 6 +- .../TasksClientDocumentationIT.java | 2 +- .../documentation/ReindexDocumentationIT.java | 4 +- .../reindex/RestRethrottleAction.java | 2 +- .../reindex/TransportRethrottleAction.java | 2 +- .../elasticsearch/reindex/CancelTests.java | 6 +- .../reindex/RethrottleTests.java | 4 +- .../elasticsearch/reindex/RoundTripTests.java | 4 +- .../TransportRethrottleActionTests.java | 2 +- .../node/tasks/CancellableTasksIT.java | 16 ++--- .../persistent/PersistentTasksExecutorIT.java | 29 ++++++-- .../search/SearchCancellationIT.java | 6 +- .../search/ccs/CrossClusterSearchIT.java | 2 +- .../node/tasks/cancel/CancelTasksRequest.java | 8 +-- .../cancel/TransportCancelTasksAction.java | 12 ++-- .../action/search/SearchTransportService.java | 2 +- .../support/tasks/BaseTasksRequest.java | 70 ++++++++++++++----- .../support/tasks/TasksRequestBuilder.java | 8 +-- .../support/tasks/TransportTasksAction.java | 12 ++-- .../persistent/PersistentTasksService.java | 2 +- .../action/RestCancellableNodeClient.java | 2 +- .../admin/cluster/RestCancelTasksAction.java | 4 +- .../admin/cluster/RestListTasksAction.java | 2 +- .../node/tasks/CancellableTasksTests.java | 14 ++-- .../node/tasks/TransportTasksActionTests.java | 4 +- .../tasks/cancel/CancelTasksRequestTests.java | 8 +-- .../RestCancellableNodeClientTests.java | 2 +- .../xpack/search/AsyncSearchTask.java | 2 +- .../AbstractEqlBlockingIntegTestCase.java | 2 +- .../ml/action/TransportDeleteJobAction.java | 2 +- .../ml/dataframe/steps/ReindexingStep.java | 2 +- .../AbstractSqlBlockingIntegTestCase.java | 2 +- 33 files changed, 151 insertions(+), 100 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java index a9c9df2c451e9..54525a8cd304d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java @@ -34,15 +34,15 @@ static Request cancelTasks(CancelTasksRequest req) { } static Request listTasks(ListTasksRequest listTaskRequest) { - if (listTaskRequest.getTaskId() != null && listTaskRequest.getTaskId().isSet()) { - throw new IllegalArgumentException("TaskId cannot be used for list tasks request"); + if (listTaskRequest.getTargetTaskId() != null && listTaskRequest.getTargetTaskId().isSet()) { + throw new IllegalArgumentException("TargetTaskId cannot be used for list tasks request"); } Request request = new Request(HttpGet.METHOD_NAME, "/_tasks"); RequestConverters.Params params = new RequestConverters.Params(); params.withTimeout(listTaskRequest.getTimeout()) .withDetailed(listTaskRequest.getDetailed()) .withWaitForCompletion(listTaskRequest.getWaitForCompletion()) - .withParentTaskId(listTaskRequest.getParentTaskId()) + .withParentTaskId(listTaskRequest.getTargetParentTaskId()) .withNodes(listTaskRequest.getNodes()) .withActions(listTaskRequest.getActions()) .putParam("group_by", "none"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java index 7b115b1f4c6f1..2b92e924d1062 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksRequestConvertersTests.java @@ -72,7 +72,7 @@ public void testListTasks() { if (randomBoolean()) { if (randomBoolean()) { TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); - request.setParentTaskId(taskId); + request.setTargetParentTaskId(taskId); expectedParams.put("parent_task_id", taskId.toString()); } else { request.setParentTask(TaskId.EMPTY_TASK_ID); @@ -102,12 +102,12 @@ public void testListTasks() { } { ListTasksRequest request = new ListTasksRequest(); - request.setTaskId(new TaskId(randomAlphaOfLength(5), randomNonNegativeLong())); + request.setTargetTaskId(new TaskId(randomAlphaOfLength(5), randomNonNegativeLong())); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, () -> TasksRequestConverters.listTasks(request) ); - assertEquals("TaskId cannot be used for list tasks request", exception.getMessage()); + assertEquals("TargetTaskId cannot be used for list tasks request", exception.getMessage()); } } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java index c7d434def0dae..2a541255409f5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java @@ -67,7 +67,7 @@ public void testListTasks() throws IOException { // tag::list-tasks-request-filter request.setActions("cluster:*"); // <1> request.setNodes("nodeId1", "nodeId2"); // <2> - request.setParentTaskId(new TaskId("parentTaskId", 42)); // <3> + request.setTargetParentTaskId(new TaskId("parentTaskId", 42)); // <3> // end::list-tasks-request-filter // tag::list-tasks-request-detailed diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java index b5a200afc78a3..ae7f95bc43ae7 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java @@ -217,13 +217,13 @@ public void testTasks() throws Exception { .setActions(UpdateByQueryAction.NAME).get().getTasks(); // Cancel a specific update-by-query request client.admin().cluster().prepareCancelTasks() - .setTaskId(taskId).get().getTasks(); + .setTargetTaskId(taskId).get().getTasks(); // end::update-by-query-cancel-task } { // tag::update-by-query-rethrottle new RethrottleRequestBuilder(client, RethrottleAction.INSTANCE) - .setTaskId(taskId) + .setTargetTaskId(taskId) .setRequestsPerSecond(2.0f) .get(); // end::update-by-query-rethrottle diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java index 8605a210aec55..c046e3c3aba39 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/RestRethrottleAction.java @@ -44,7 +44,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { RethrottleRequest internalRequest = new RethrottleRequest(); - internalRequest.setTaskId(new TaskId(request.param("taskId"))); + internalRequest.setTargetTaskId(new TaskId(request.param("taskId"))); Float requestsPerSecond = AbstractBaseReindexRestHandler.parseRequestsPerSecond(request); if (requestsPerSecond == null) { throw new IllegalArgumentException("requests_per_second is a required parameter"); diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java index d7ad28381cf63..2fd2c5f93d3ee 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/TransportRethrottleAction.java @@ -93,7 +93,7 @@ private static void rethrottleParentTask( if (runningSubtasks > 0) { RethrottleRequest subRequest = new RethrottleRequest(); subRequest.setRequestsPerSecond(newRequestsPerSecond / runningSubtasks); - subRequest.setParentTaskId(new TaskId(localNodeId, task.getId())); + subRequest.setTargetParentTaskId(new TaskId(localNodeId, task.getId())); logger.debug("rethrottling children of task [{}] to [{}] requests per second", task.getId(), subRequest.getRequestsPerSecond()); client.execute(RethrottleAction.INSTANCE, subRequest, ActionListener.wrap(r -> { r.rethrowFailures("Rethrottle"); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java index be62bc0205b39..75a72ec34571a 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/CancelTests.java @@ -139,7 +139,7 @@ private void testCancel( // Cancel the request while the action is blocked by the indexing operation listeners. // This will prevent further requests from being sent. - ListTasksResponse cancelTasksResponse = client().admin().cluster().prepareCancelTasks().setTaskId(mainTask.getTaskId()).get(); + ListTasksResponse cancelTasksResponse = client().admin().cluster().prepareCancelTasks().setTargetTaskId(mainTask.getTaskId()).get(); cancelTasksResponse.rethrowFailures("Cancel"); assertThat(cancelTasksResponse.getTasks(), hasSize(1)); @@ -155,7 +155,7 @@ private void testCancel( ListTasksResponse sliceList = client().admin() .cluster() .prepareListTasks() - .setParentTaskId(mainTask.getTaskId()) + .setTargetParentTaskId(mainTask.getTaskId()) .setDetailed(true) .get(); sliceList.rethrowFailures("Fetch slice tasks"); @@ -193,7 +193,7 @@ private void testCancel( String tasks = client().admin() .cluster() .prepareListTasks() - .setParentTaskId(mainTask.getTaskId()) + .setTargetParentTaskId(mainTask.getTaskId()) .setDetailed(true) .get() .toString(); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java index b3be206e5c5c0..99ceb5087bff0 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RethrottleTests.java @@ -108,7 +108,7 @@ private void testCase(AbstractBulkByScrollRequestBuilder request, String a .getTask() .getStatus(); long finishedSubTasks = parent.getSliceStatuses().stream().filter(Objects::nonNull).count(); - ListTasksResponse list = client().admin().cluster().prepareListTasks().setParentTaskId(taskToRethrottle).get(); + ListTasksResponse list = client().admin().cluster().prepareListTasks().setTargetParentTaskId(taskToRethrottle).get(); list.rethrowFailures("subtasks"); assertThat(finishedSubTasks + list.getTasks().size(), greaterThanOrEqualTo((long) numSlices)); assertThat(list.getTasks().size(), greaterThan(0)); @@ -192,7 +192,7 @@ private ListTasksResponse rethrottleTask(TaskId taskToRethrottle, float newReque assertBusy(() -> { try { - ListTasksResponse rethrottleResponse = rethrottle().setTaskId(taskToRethrottle) + ListTasksResponse rethrottleResponse = rethrottle().setTargetTaskId(taskToRethrottle) .setRequestsPerSecond(newRequestsPerSecond) .get(); rethrottleResponse.rethrowFailures("Rethrottle"); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/RoundTripTests.java index 073c0a68a5400..cacc829654f63 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/RoundTripTests.java @@ -174,12 +174,12 @@ public void testRethrottleRequest() throws IOException { if (randomBoolean()) { request.setActions(randomFrom(UpdateByQueryAction.NAME, ReindexAction.NAME)); } else { - request.setTaskId(new TaskId(randomAlphaOfLength(5), randomLong())); + request.setTargetTaskId(new TaskId(randomAlphaOfLength(5), randomLong())); } RethrottleRequest tripped = new RethrottleRequest(toInputByteStream(request)); assertEquals(request.getRequestsPerSecond(), tripped.getRequestsPerSecond(), 0.00001); assertArrayEquals(request.getActions(), tripped.getActions()); - assertEquals(request.getTaskId(), tripped.getTaskId()); + assertEquals(request.getTargetTaskId(), tripped.getTargetTaskId()); } private StreamInput toInputByteStream(Writeable example) throws IOException { diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/TransportRethrottleActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/TransportRethrottleActionTests.java index 1359dd394e51e..902a32ae75cbe 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/TransportRethrottleActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/TransportRethrottleActionTests.java @@ -75,7 +75,7 @@ private void rethrottleTestCase( if (runningSlices > 0) { verify(client).execute(eq(RethrottleAction.INSTANCE), subRequest.capture(), subListener.capture()); - assertEquals(new TaskId(localNodeId, task.getId()), subRequest.getValue().getParentTaskId()); + assertEquals(new TaskId(localNodeId, task.getId()), subRequest.getValue().getTargetParentTaskId()); assertEquals(newRequestsPerSecond / runningSlices, subRequest.getValue().getRequestsPerSecond(), 0.00001f); simulator.accept(subListener.getValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index 1c646fb9beebd..4de462ee96a73 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -167,7 +167,7 @@ public void testBanOnlyNodesWithOutstandingDescendantTasks() throws Exception { ActionFuture cancelFuture = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(rootTaskId) + .setTargetTaskId(rootTaskId) .waitForCompletion(true) .execute(); if (randomBoolean()) { @@ -179,7 +179,7 @@ public void testBanOnlyNodesWithOutstandingDescendantTasks() throws Exception { .get() .getTasks(); for (TaskInfo subTask : randomSubsetOf(runningTasks)) { - client().admin().cluster().prepareCancelTasks().setTaskId(subTask.getTaskId()).waitForCompletion(false).get(); + client().admin().cluster().prepareCancelTasks().setTargetTaskId(subTask.getTaskId()).waitForCompletion(false).get(); } } try { @@ -217,13 +217,13 @@ public void testCancelTaskMultipleTimes() throws Exception { ActionFuture mainTaskFuture = client().execute(TransportTestAction.ACTION, rootRequest); TaskId taskId = getRootTaskId(rootRequest); allowPartialRequest(rootRequest); - CancelTasksResponse resp = client().admin().cluster().prepareCancelTasks().setTaskId(taskId).waitForCompletion(false).get(); + CancelTasksResponse resp = client().admin().cluster().prepareCancelTasks().setTargetTaskId(taskId).waitForCompletion(false).get(); assertThat(resp.getTaskFailures(), empty()); assertThat(resp.getNodeFailures(), empty()); ActionFuture cancelFuture = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(taskId) + .setTargetTaskId(taskId) .waitForCompletion(true) .execute(); assertFalse(cancelFuture.isDone()); @@ -234,7 +234,7 @@ public void testCancelTaskMultipleTimes() throws Exception { CancelTasksResponse cancelError = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(taskId) + .setTargetTaskId(taskId) .waitForCompletion(randomBoolean()) .get(); assertThat(cancelError.getNodeFailures(), hasSize(1)); @@ -255,7 +255,7 @@ public void testDoNotWaitForCompletion() throws Exception { ActionFuture cancelFuture = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(taskId) + .setTargetTaskId(taskId) .waitForCompletion(waitForCompletion) .execute(); if (waitForCompletion) { @@ -274,7 +274,7 @@ public void testFailedToStartChildTaskAfterCancelled() throws Exception { TestRequest rootRequest = generateTestRequest(nodes, 0, between(1, 3)); ActionFuture rootTaskFuture = client().execute(TransportTestAction.ACTION, rootRequest); TaskId taskId = getRootTaskId(rootRequest); - client().admin().cluster().prepareCancelTasks().setTaskId(taskId).waitForCompletion(false).get(); + client().admin().cluster().prepareCancelTasks().setTargetTaskId(taskId).waitForCompletion(false).get(); DiscoveryNode nodeWithParentTask = nodes.stream().filter(n -> n.getId().equals(taskId.getNodeId())).findFirst().get(); TransportTestAction mainAction = internalCluster().getInstance(TransportTestAction.class, nodeWithParentTask.getName()); PlainActionFuture future = new PlainActionFuture<>(); @@ -323,7 +323,7 @@ public void testRemoveBanParentsOnDisconnect() throws Exception { ActionFuture cancelFuture = client().admin() .cluster() .prepareCancelTasks() - .setTaskId(rootTaskId) + .setTargetTaskId(rootTaskId) .waitForCompletion(true) .execute(); try { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index 8fada58de5c4a..685fae6114760 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -89,7 +89,11 @@ public void testPersistentActionFailure() throws Exception { logger.info("Failing the running task"); // Fail the running task and make sure it restarts properly assertThat( - new TestTasksRequestBuilder(client()).setOperation("fail").setTaskId(firstRunningTask.getTaskId()).get().getTasks().size(), + new TestTasksRequestBuilder(client()).setOperation("fail") + .setTargetTaskId(firstRunningTask.getTaskId()) + .get() + .getTasks() + .size(), equalTo(1) ); @@ -255,7 +259,7 @@ public void testPersistentActionStatusUpdate() throws Exception { // Complete the running task and make sure it finishes properly assertThat( new TestTasksRequestBuilder(client()).setOperation("update_status") - .setTaskId(firstRunningTask.getTaskId()) + .setTargetTaskId(firstRunningTask.getTaskId()) .get() .getTasks() .size(), @@ -296,7 +300,11 @@ public void testPersistentActionStatusUpdate() throws Exception { logger.info("Completing the running task"); // Complete the running task and make sure it finishes properly assertThat( - new TestTasksRequestBuilder(client()).setOperation("finish").setTaskId(firstRunningTask.getTaskId()).get().getTasks().size(), + new TestTasksRequestBuilder(client()).setOperation("finish") + .setTargetTaskId(firstRunningTask.getTaskId()) + .get() + .getTasks() + .size(), equalTo(1) ); @@ -327,7 +335,11 @@ public void testCreatePersistentTaskWithDuplicateId() throws Exception { logger.info("Completing the running task"); // Fail the running task and make sure it restarts properly assertThat( - new TestTasksRequestBuilder(client()).setOperation("finish").setTaskId(firstRunningTask.getTaskId()).get().getTasks().size(), + new TestTasksRequestBuilder(client()).setOperation("finish") + .setTargetTaskId(firstRunningTask.getTaskId()) + .get() + .getTasks() + .size(), equalTo(1) ); @@ -435,7 +447,7 @@ public void testAbortLocally() throws Exception { assertThat( new TestTasksRequestBuilder(client()).setOperation("abort_locally") - .setTaskId(firstRunningTask.getTaskId()) + .setTargetTaskId(firstRunningTask.getTaskId()) .get() .getTasks() .size(), @@ -494,12 +506,15 @@ private void stopOrCancelTask(TaskId taskId) { if (randomBoolean()) { logger.info("Completing the running task"); // Complete the running task and make sure it finishes properly - assertThat(new TestTasksRequestBuilder(client()).setOperation("finish").setTaskId(taskId).get().getTasks().size(), equalTo(1)); + assertThat( + new TestTasksRequestBuilder(client()).setOperation("finish").setTargetTaskId(taskId).get().getTasks().size(), + equalTo(1) + ); } else { logger.info("Cancelling the running task"); // Cancel the running task and make sure it finishes properly - assertThat(client().admin().cluster().prepareCancelTasks().setTaskId(taskId).get().getTasks().size(), equalTo(1)); + assertThat(client().admin().cluster().prepareCancelTasks().setTargetTaskId(taskId).get().getTasks().size(), equalTo(1)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java index 39b7e5df5f319..33ddf58122a20 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java @@ -131,7 +131,11 @@ private void cancelSearch(String action) { TaskInfo searchTask = listTasksResponse.getTasks().get(0); logger.info("Cancelling search"); - CancelTasksResponse cancelTasksResponse = client().admin().cluster().prepareCancelTasks().setTaskId(searchTask.getTaskId()).get(); + CancelTasksResponse cancelTasksResponse = client().admin() + .cluster() + .prepareCancelTasks() + .setTargetTaskId(searchTask.getTaskId()) + .get(); assertThat(cancelTasksResponse.getTasks(), hasSize(1)); assertThat(cancelTasksResponse.getTasks().get(0).getTaskId(), equalTo(searchTask.getTaskId())); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index bb25fbe01771f..312c5ae220c1c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -213,7 +213,7 @@ public void testCancel() throws Exception { .filter(t -> t.getParentTaskId().isSet() == false) .findFirst() .get(); - final CancelTasksRequest cancelRequest = new CancelTasksRequest().setTaskId(rootTask.getTaskId()); + final CancelTasksRequest cancelRequest = new CancelTasksRequest().setTargetTaskId(rootTask.getTaskId()); cancelRequest.setWaitForCompletion(randomBoolean()); final ActionFuture cancelFuture = client().admin().cluster().cancelTasks(cancelRequest); assertBusy(() -> { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java index df0db7d81e94c..d3596f2a652bf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequest.java @@ -81,10 +81,10 @@ public String getDescription() { + reason + "], waitForCompletion[" + waitForCompletion - + "], taskId[" - + getTaskId() - + "], parentTaskId[" - + getParentTaskId() + + "], targetTaskId[" + + getTargetTaskId() + + "], targetParentTaskId[" + + getTargetParentTaskId() + "], nodes" + Arrays.toString(getNodes()) + ", actions" diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java index cebdb6266f253..339d3d2e17e75 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java @@ -60,21 +60,21 @@ protected CancelTasksResponse newResponse( } protected void processTasks(CancelTasksRequest request, Consumer operation) { - if (request.getTaskId().isSet()) { + if (request.getTargetTaskId().isSet()) { // we are only checking one task, we can optimize it - CancellableTask task = taskManager.getCancellableTask(request.getTaskId().getId()); + CancellableTask task = taskManager.getCancellableTask(request.getTargetTaskId().getId()); if (task != null) { if (request.match(task)) { operation.accept(task); } else { - throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support this operation"); + throw new IllegalArgumentException("task [" + request.getTargetTaskId() + "] doesn't support this operation"); } } else { - if (taskManager.getTask(request.getTaskId().getId()) != null) { + if (taskManager.getTask(request.getTargetTaskId().getId()) != null) { // The task exists, but doesn't support cancellation - throw new IllegalArgumentException("task [" + request.getTaskId() + "] doesn't support cancellation"); + throw new IllegalArgumentException("task [" + request.getTargetTaskId() + "] doesn't support cancellation"); } else { - throw new ResourceNotFoundException("task [{}] is not found", request.getTaskId()); + throw new ResourceNotFoundException("task [{}] is not found", request.getTargetTaskId()); } } } else { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 00c74fcad4d26..e00fa8155d1ef 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -668,7 +668,7 @@ private boolean assertNodePresent() { } public void cancelSearchTask(SearchTask task, String reason) { - CancelTasksRequest req = new CancelTasksRequest().setTaskId(new TaskId(client.getLocalNodeId(), task.getId())) + CancelTasksRequest req = new CancelTasksRequest().setTargetTaskId(new TaskId(client.getLocalNodeId(), task.getId())) .setReason("Fatal failure during search: " + reason); // force the origin to execute the cancellation as a system user new OriginSettingClient(client, GetTaskAction.TASKS_ORIGIN).admin().cluster().cancelTasks(req, ActionListener.wrap(() -> {})); diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java b/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java index ad3e95836572f..1f71e4d1f6ff6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksRequest.java @@ -38,9 +38,9 @@ public class BaseTasksRequest> extends private String[] actions = ALL_ACTIONS; - private TaskId parentTaskId = TaskId.EMPTY_TASK_ID; + private TaskId targetParentTaskId = TaskId.EMPTY_TASK_ID; - private TaskId taskId = TaskId.EMPTY_TASK_ID; + private TaskId targetTaskId = TaskId.EMPTY_TASK_ID; // NOTE: This constructor is only needed, because the setters in this class, // otherwise it can be removed and above fields can be made final. @@ -48,8 +48,8 @@ public BaseTasksRequest() {} protected BaseTasksRequest(StreamInput in) throws IOException { super(in); - taskId = TaskId.readFromStream(in); - parentTaskId = TaskId.readFromStream(in); + targetTaskId = TaskId.readFromStream(in); + targetParentTaskId = TaskId.readFromStream(in); nodes = in.readStringArray(); actions = in.readStringArray(); timeout = in.readOptionalTimeValue(); @@ -58,8 +58,8 @@ protected BaseTasksRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - taskId.writeTo(out); - parentTaskId.writeTo(out); + targetTaskId.writeTo(out); + targetParentTaskId.writeTo(out); out.writeStringArrayNullable(nodes); out.writeStringArrayNullable(actions); out.writeOptionalTimeValue(timeout); @@ -68,7 +68,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; - if (taskId.isSet() && nodes.length > 0) { + if (targetTaskId.isSet() && nodes.length > 0) { validationException = addValidationError("task id cannot be used together with node ids", validationException); } return validationException; @@ -105,29 +105,61 @@ public final Request setNodes(String... nodes) { * * By default tasks with any ids are returned. */ - public TaskId getTaskId() { - return taskId; + public TaskId getTargetTaskId() { + return targetTaskId; } @SuppressWarnings("unchecked") - public final Request setTaskId(TaskId taskId) { - this.taskId = taskId; + public final Request setTargetTaskId(TaskId targetTaskId) { + this.targetTaskId = targetTaskId; return (Request) this; } + /** + * @deprecated Use {@link #getTargetTaskId()} + */ + @Deprecated + public TaskId getTaskId() { + return getTargetTaskId(); + } + + /** + * @deprecated Use {@link #setTargetTaskId(TaskId)} + */ + @Deprecated + public final Request setTaskId(TaskId taskId) { + return setTargetTaskId(taskId); + } + /** * Returns the parent task id that tasks should be filtered by */ - public TaskId getParentTaskId() { - return parentTaskId; + public TaskId getTargetParentTaskId() { + return targetParentTaskId; } @SuppressWarnings("unchecked") - public Request setParentTaskId(TaskId parentTaskId) { - this.parentTaskId = parentTaskId; + public Request setTargetParentTaskId(TaskId targetParentTaskId) { + this.targetParentTaskId = targetParentTaskId; return (Request) this; } + /** + * @deprecated Use {@link #getTargetParentTaskId()} + */ + @Deprecated + public TaskId getParentTaskId() { + return getTargetParentTaskId(); + } + + /** + * @deprecated Use {@link #setTargetParentTaskId(TaskId)} + */ + @Deprecated + public Request setParentTaskId(TaskId parentTaskId) { + return setTargetParentTaskId(parentTaskId); + } + public TimeValue getTimeout() { return this.timeout; } @@ -148,13 +180,13 @@ public boolean match(Task task) { if (CollectionUtils.isEmpty(getActions()) == false && Regex.simpleMatch(getActions(), task.getAction()) == false) { return false; } - if (getTaskId().isSet()) { - if (getTaskId().getId() != task.getId()) { + if (getTargetTaskId().isSet()) { + if (getTargetTaskId().getId() != task.getId()) { return false; } } - if (parentTaskId.isSet()) { - if (parentTaskId.equals(task.getParentTaskId()) == false) { + if (targetParentTaskId.isSet()) { + if (targetParentTaskId.equals(task.getParentTaskId()) == false) { return false; } } diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java index e283ecc34e4eb..c827e3b55deee 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TasksRequestBuilder.java @@ -29,8 +29,8 @@ protected TasksRequestBuilder(ElasticsearchClient client, ActionType a * Set the task to lookup. */ @SuppressWarnings("unchecked") - public final RequestBuilder setTaskId(TaskId taskId) { - request.setTaskId(taskId); + public final RequestBuilder setTargetTaskId(TaskId taskId) { + request.setTargetTaskId(taskId); return (RequestBuilder) this; } @@ -56,8 +56,8 @@ public final RequestBuilder setTimeout(TimeValue timeout) { * Match all children of the provided task. */ @SuppressWarnings("unchecked") - public final RequestBuilder setParentTaskId(TaskId taskId) { - request.setParentTaskId(taskId); + public final RequestBuilder setTargetParentTaskId(TaskId taskId) { + request.setTargetParentTaskId(taskId); return (RequestBuilder) this; } } diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index a3550a7d9f93f..445274896ada3 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -146,8 +146,8 @@ protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) { } protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) { - if (request.getTaskId().isSet()) { - return new String[] { request.getTaskId().getNodeId() }; + if (request.getTargetTaskId().isSet()) { + return new String[] { request.getTargetTaskId().getNodeId() }; } else { return clusterState.nodes().resolveNodes(request.getNodes()); } @@ -155,17 +155,17 @@ protected String[] resolveNodes(TasksRequest request, ClusterState clusterState) @SuppressWarnings("unchecked") protected void processTasks(TasksRequest request, Consumer operation) { - if (request.getTaskId().isSet()) { + if (request.getTargetTaskId().isSet()) { // we are only checking one task, we can optimize it - Task task = taskManager.getTask(request.getTaskId().getId()); + Task task = taskManager.getTask(request.getTargetTaskId().getId()); if (task != null) { if (request.match(task)) { operation.accept((OperationTask) task); } else { - throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.getTaskId()); + throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.getTargetTaskId()); } } else { - throw new ResourceNotFoundException("task [{}] is missing", request.getTaskId()); + throw new ResourceNotFoundException("task [{}] is missing", request.getTargetTaskId()); } } else { for (Task task : taskManager.getTasks().values()) { diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java index aa4866294f5a3..5da8f570eacfc 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java @@ -92,7 +92,7 @@ public void sendCompletionRequest( */ void sendCancelRequest(final long taskId, final String reason, final ActionListener listener) { CancelTasksRequest request = new CancelTasksRequest(); - request.setTaskId(new TaskId(clusterService.localNode().getId(), taskId)); + request.setTargetTaskId(new TaskId(clusterService.localNode().getId(), taskId)); request.setReason(reason); try { client.admin().cluster().cancelTasks(request, listener); diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestCancellableNodeClient.java b/server/src/main/java/org/elasticsearch/rest/action/RestCancellableNodeClient.java index deb76bfdd5271..70217c057a7a7 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestCancellableNodeClient.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestCancellableNodeClient.java @@ -104,7 +104,7 @@ public void onFailure(Exception e) { } private void cancelTask(TaskId taskId) { - CancelTasksRequest req = new CancelTasksRequest().setTaskId(taskId).setReason("http channel [" + httpChannel + "] closed"); + CancelTasksRequest req = new CancelTasksRequest().setTargetTaskId(taskId).setReason("http channel [" + httpChannel + "] closed"); // force the origin to execute the cancellation as a system user new OriginSettingClient(client, TASKS_ORIGIN).admin().cluster().cancelTasks(req, ActionListener.wrap(() -> {})); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java index 5a0100a072ce6..d8bba544eb395 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCancelTasksAction.java @@ -49,10 +49,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final String groupBy = request.param("group_by", "nodes"); CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); - cancelTasksRequest.setTaskId(taskId); + cancelTasksRequest.setTargetTaskId(taskId); cancelTasksRequest.setNodes(nodesIds); cancelTasksRequest.setActions(actions); - cancelTasksRequest.setParentTaskId(parentTaskId); + cancelTasksRequest.setTargetParentTaskId(parentTaskId); cancelTasksRequest.setWaitForCompletion(request.paramAsBoolean("wait_for_completion", cancelTasksRequest.waitForCompletion())); return channel -> client.admin() .cluster() diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java index 2fd080204c1cb..20a7687e0fae1 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java @@ -69,7 +69,7 @@ public static ListTasksRequest generateListTasksRequest(RestRequest request) { listTasksRequest.setNodes(nodes); listTasksRequest.setDetailed(detailed); listTasksRequest.setActions(actions); - listTasksRequest.setParentTaskId(parentTaskId); + listTasksRequest.setTargetParentTaskId(parentTaskId); listTasksRequest.setWaitForCompletion(waitForCompletion); listTasksRequest.setTimeout(timeout); return listTasksRequest; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index ad7166ef9b044..aa86f3d0a0414 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -282,7 +282,7 @@ public void onFailure(Exception e) { // Cancel main task CancelTasksRequest request = new CancelTasksRequest(); request.setReason("Testing Cancellation"); - request.setTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); + request.setTargetTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); // And send the cancellation request to a random node CancelTasksResponse response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction, @@ -316,7 +316,7 @@ public void onFailure(Exception e) { // Make sure that tasks are no longer running ListTasksResponse listTasksResponse = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportListTasksAction, - new ListTasksRequest().setTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())) + new ListTasksRequest().setTargetTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())) ); assertEquals(0, listTasksResponse.getTasks().size()); @@ -354,7 +354,7 @@ public void onFailure(Exception e) { // Cancel all child tasks without cancelling the main task, which should quit on its own CancelTasksRequest request = new CancelTasksRequest(); request.setReason("Testing Cancellation"); - request.setParentTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); + request.setTargetParentTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); // And send the cancellation request to a random node CancelTasksResponse response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(1, testNodes.length - 1)].transportCancelTasksAction, @@ -373,7 +373,7 @@ public void onFailure(Exception e) { // Make sure that main task is no longer running ListTasksResponse listTasksResponse = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportListTasksAction, - new ListTasksRequest().setTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())) + new ListTasksRequest().setTargetTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())) ); assertEquals(0, listTasksResponse.getTasks().size()); }); @@ -460,7 +460,7 @@ public void onFailure(Exception e) { // Make sure that tasks are running ListTasksResponse listTasksResponse = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportListTasksAction, - new ListTasksRequest().setParentTaskId(new TaskId(mainNode, mainTask.getId())) + new ListTasksRequest().setTargetParentTaskId(new TaskId(mainNode, mainTask.getId())) ); assertThat(listTasksResponse.getTasks().size(), greaterThanOrEqualTo(blockOnNodes.size())); @@ -474,7 +474,7 @@ public void onFailure(Exception e) { // Simulate issuing cancel request on the node that is about to leave the cluster CancelTasksRequest request = new CancelTasksRequest(); request.setReason("Testing Cancellation"); - request.setTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); + request.setTargetTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId())); // And send the cancellation request to a random node CancelTasksResponse response = ActionTestUtils.executeBlocking(testNodes[0].transportCancelTasksAction, request); logger.info("--> Done simulating issuing cancel request on the node that is about to leave the cluster"); @@ -502,7 +502,7 @@ public void onFailure(Exception e) { // Make sure that tasks are no longer running ListTasksResponse listTasksResponse1 = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(1, testNodes.length - 1)].transportListTasksAction, - new ListTasksRequest().setTaskId(new TaskId(mainNode, mainTask.getId())) + new ListTasksRequest().setTargetTaskId(new TaskId(mainNode, mainTask.getId())) ); assertEquals(0, listTasksResponse1.getTasks().size()); }); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index a87ed2331de9f..d982e83487cd7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -428,7 +428,7 @@ public void testFindChildTasks() throws Exception { // Find tasks with common parent listTasksRequest = new ListTasksRequest(); - listTasksRequest.setParentTaskId(new TaskId(parentNode, parentTaskId)); + listTasksRequest.setTargetParentTaskId(new TaskId(parentNode, parentTaskId)); response = ActionTestUtils.executeBlocking(testNode.transportListTasksAction, listTasksRequest); assertEquals(testNodes.length, response.getTasks().size()); for (TaskInfo task : response.getTasks()) { @@ -506,7 +506,7 @@ public void testCancellingTasksThatDontSupportCancellation() throws Exception { // Try to cancel main task using id request = new CancelTasksRequest(); request.setReason("Testing Cancellation"); - request.setTaskId(new TaskId(testNodes[0].getNodeId(), task.getId())); + request.setTargetTaskId(new TaskId(testNodes[0].getNodeId(), task.getId())); response = ActionTestUtils.executeBlocking( testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction, request diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestTests.java index 73916abfcdf1a..df0f1b8b99a11 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksRequestTests.java @@ -20,11 +20,11 @@ public void testGetDescription() { CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); cancelTasksRequest.setActions("action1", "action2"); cancelTasksRequest.setNodes("node1", "node2"); - cancelTasksRequest.setTaskId(new TaskId("node1", 1)); - cancelTasksRequest.setParentTaskId(new TaskId("node1", 0)); + cancelTasksRequest.setTargetTaskId(new TaskId("node1", 1)); + cancelTasksRequest.setTargetParentTaskId(new TaskId("node1", 0)); assertEquals( - "reason[by user request], waitForCompletion[false], taskId[node1:1], " - + "parentTaskId[node1:0], nodes[node1, node2], actions[action1, action2]", + "reason[by user request], waitForCompletion[false], targetTaskId[node1:1], " + + "targetParentTaskId[node1:0], nodes[node1, node2], actions[action1, action2]", cancelTasksRequest.getDescription() ); Task task = cancelTasksRequest.createTask(1, "type", "action", null, Collections.emptyMap()); diff --git a/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java b/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java index a94c03adade83..f50d2fc1f17b3 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java @@ -170,7 +170,7 @@ public Task exe switch (action.name()) { case CancelTasksAction.NAME: CancelTasksRequest cancelTasksRequest = (CancelTasksRequest) request; - assertTrue("tried to cancel the same task more than once", cancelledTasks.add(cancelTasksRequest.getTaskId())); + assertTrue("tried to cancel the same task more than once", cancelledTasks.add(cancelTasksRequest.getTargetTaskId())); Task task = request.createTask(counter.getAndIncrement(), "cancel_task", action.name(), null, Collections.emptyMap()); if (randomBoolean()) { listener.onResponse(null); diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java index d7027859a9107..24a92005609b5 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java @@ -146,7 +146,7 @@ public void cancelTask(TaskManager taskManager, Runnable runnable, String reason */ public void cancelTask(Runnable runnable, String reason) { if (isCancelled() == false && isCancelling.compareAndSet(false, true)) { - CancelTasksRequest req = new CancelTasksRequest().setTaskId(searchId.getTaskId()).setReason(reason); + CancelTasksRequest req = new CancelTasksRequest().setTargetTaskId(searchId.getTaskId()).setReason(reason); client.admin().cluster().cancelTasks(req, new ActionListener<>() { @Override public void onResponse(CancelTasksResponse cancelTasksResponse) { diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java index 0c3361baf843b..ae27e100b0a6d 100644 --- a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AbstractEqlBlockingIntegTestCase.java @@ -258,7 +258,7 @@ protected TaskId cancelTaskWithXOpaqueId(String id, String action) { TaskId taskId = findTaskWithXOpaqueId(id, action); assertNotNull(taskId); logger.trace("Cancelling task " + taskId); - CancelTasksResponse response = client().admin().cluster().prepareCancelTasks().setTaskId(taskId).get(); + CancelTasksResponse response = client().admin().cluster().prepareCancelTasks().setTargetTaskId(taskId).get(); assertThat(response.getTasks(), hasSize(1)); assertThat(response.getTasks().get(0).getAction(), equalTo(action)); logger.trace("Task is cancelled " + taskId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index d0111ab260480..da59bf43aa244 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -361,7 +361,7 @@ private void cancelResetTaskIfExists(String jobId, ActionListener liste CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); cancelTasksRequest.setReason("deleting job"); cancelTasksRequest.setActions(ResetJobAction.NAME); - cancelTasksRequest.setTaskId(job.getBlocked().getTaskId()); + cancelTasksRequest.setTargetTaskId(job.getBlocked().getTaskId()); executeAsyncWithOrigin( client, ML_ORIGIN, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java index a93ff43a4dfa3..2d3f9e724053c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java @@ -267,7 +267,7 @@ public void cancel(String reason, TimeValue timeout) { LOGGER.debug("[{}] Cancelling reindex task [{}]", config.getId(), reindexTaskId); CancelTasksRequest cancelReindex = new CancelTasksRequest(); - cancelReindex.setTaskId(reindexTaskId); + cancelReindex.setTargetTaskId(reindexTaskId); cancelReindex.setReason(reason); cancelReindex.setTimeout(timeout); diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java index 42ce6a30877b4..8a3f0e0c21b97 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/AbstractSqlBlockingIntegTestCase.java @@ -271,7 +271,7 @@ protected TaskId cancelTaskWithXOpaqueId(String id, String action) { TaskId taskId = findTaskWithXOpaqueId(id, action); assertNotNull(taskId); logger.trace("Cancelling task " + taskId); - CancelTasksResponse response = client().admin().cluster().prepareCancelTasks().setTaskId(taskId).get(); + CancelTasksResponse response = client().admin().cluster().prepareCancelTasks().setTargetTaskId(taskId).get(); assertThat(response.getTasks(), hasSize(1)); assertThat(response.getTasks().get(0).getAction(), equalTo(action)); logger.trace("Task is cancelled " + taskId); From d1af86cfdd770b6079d51b0421bfd377ae0c7d69 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Wed, 24 Nov 2021 10:09:45 -0800 Subject: [PATCH 39/88] [DOCS] Fixes start and stop trained model deployment APIs (#80978) --- .../apis/get-dfanalytics-stats.asciidoc | 2 +- .../apis/get-dfanalytics.asciidoc | 2 +- .../start-trained-model-deployment.asciidoc | 40 ++++++++++--------- .../apis/stop-dfanalytics.asciidoc | 2 +- .../stop-trained-model-deployment.asciidoc | 14 ++++--- docs/reference/ml/ml-shared.asciidoc | 33 ++++++++++----- 6 files changed, 56 insertions(+), 37 deletions(-) diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc index 5efa48b559e9d..b18899cc86558 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics-stats.asciidoc @@ -43,7 +43,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-data-frame-analytics-def `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-dfa-jobs] `from`:: (Optional, integer) diff --git a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc index 21228d20ced4e..c225676b8ebc9 100644 --- a/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/get-dfanalytics.asciidoc @@ -54,7 +54,7 @@ You can get information for all {dfanalytics-jobs} by using _all, by specifying `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-dfa-jobs] `from`:: (Optional, integer) diff --git a/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc b/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc index a05e01d26c1ca..9fbec6d026e32 100644 --- a/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc +++ b/docs/reference/ml/df-analytics/apis/start-trained-model-deployment.asciidoc @@ -6,6 +6,8 @@ Start trained model deployment ++++ +experimental::[] + Starts a new trained model deployment. [[start-trained-model-deployment-request]] @@ -34,25 +36,6 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] [[start-trained-model-deployment-query-params]] == {api-query-parms-title} -`timeout`:: -(Optional, time) -Controls the amount of time to wait for the model to deploy. Defaults -to 20 seconds. - -`wait_for`:: -(Optional, string) -Specifies the allocation status to wait for before returning. Defaults to -`started`. The value `starting` indicates deployment is starting but not yet on -any node. The value `started` indicates the model has started on at least one -node. The value `fully_allocated` indicates the deployment has started on all -valid nodes. - -`model_threads`:: -(Optional, integer) -Indicates how many threads are used when sending inference requests to -the model. Increasing this value generally increases the throughput. Defaults to -1. - `inference_threads`:: (Optional, integer) Sets the number of threads used by the inference process. This generally increases @@ -61,12 +44,31 @@ greater than the number of available CPU cores on the machine does not increase inference speed. Defaults to 1. +`model_threads`:: +(Optional, integer) +Indicates how many threads are used when sending inference requests to +the model. Increasing this value generally increases the throughput. Defaults to +1. + `queue_capacity`:: (Optional, integer) Controls how many inference requests are allowed in the queue at a time. Once the number of requests exceeds this value, new requests are rejected with a 429 error. Defaults to 1024. +`timeout`:: +(Optional, time) +Controls the amount of time to wait for the model to deploy. Defaults +to 20 seconds. + +`wait_for`:: +(Optional, string) +Specifies the allocation status to wait for before returning. Defaults to +`started`. The value `starting` indicates deployment is starting but not yet on +any node. The value `started` indicates the model has started on at least one +node. The value `fully_allocated` indicates the deployment has started on all +valid nodes. + [[start-trained-model-deployment-example]] == {api-examples-title} diff --git a/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc index af58b2c2f6b72..3ac7be860fd1c 100644 --- a/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/stop-dfanalytics.asciidoc @@ -49,7 +49,7 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=job-id-data-frame-analytics-def `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match] +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-dfa-jobs] `force`:: diff --git a/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc b/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc index eb58a9baf8d8c..a486ee37bb239 100644 --- a/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc +++ b/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc @@ -8,15 +8,18 @@ experimental::[] +Stops a trained model deployment. + [[stop-trained-model-deployment-request]] == {api-request-title} `POST _ml/trained_models//deployment/_stop` -//// [[stop-trained-model-deployment-prereq]] == {api-prereq-title} -//// + +Requires the `manage_ml` cluster privilege. This privilege is included in the +`machine_learning_admin` built-in role. //// [[stop-trained-model-deployment-desc]] @@ -36,12 +39,11 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=model-id] `allow_no_match`:: (Optional, Boolean) -include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match] - +include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-deployments] `force`:: - (Optional, Boolean) If true, the deployment is stopped even if it is referenced by - ingest pipelines. +(Optional, Boolean) If true, the deployment is stopped even if it is referenced +by ingest pipelines. //// [role="child_attributes"] diff --git a/docs/reference/ml/ml-shared.asciidoc b/docs/reference/ml/ml-shared.asciidoc index 3a11ca6118299..97b9a42c49582 100644 --- a/docs/reference/ml/ml-shared.asciidoc +++ b/docs/reference/ml/ml-shared.asciidoc @@ -32,22 +32,22 @@ are no matches or only partial matches. -- end::allow-no-match-datafeeds[] -tag::allow-no-match-jobs[] +tag::allow-no-match-deployments[] Specifies what to do when the request: + -- -* Contains wildcard expressions and there are no jobs that match. +* Contains wildcard expressions and there are no deployments that match. * Contains the `_all` string or no identifiers and there are no matches. * Contains wildcard expressions and there are only partial matches. -The default value is `true`, which returns an empty `jobs` array -when there are no matches and the subset of results when there are partial -matches. If this parameter is `false`, the request returns a `404` status code -when there are no matches or only partial matches. +The default value is `true`, which returns an empty array when there are no +matches and the subset of results when there are partial matches. If this +parameter is `false`, the request returns a `404` status code when there are no +matches or only partial matches. -- -end::allow-no-match-jobs[] +end::allow-no-match-deployments[] -tag::allow-no-match[] +tag::allow-no-match-dfa-jobs[] Specifies what to do when the request: + -- @@ -60,7 +60,22 @@ when there are no matches and the subset of results when there are partial matches. If this parameter is `false`, the request returns a `404` status code when there are no matches or only partial matches. -- -end::allow-no-match[] +end::allow-no-match-dfa-jobs[] + +tag::allow-no-match-jobs[] +Specifies what to do when the request: ++ +-- +* Contains wildcard expressions and there are no jobs that match. +* Contains the `_all` string or no identifiers and there are no matches. +* Contains wildcard expressions and there are only partial matches. + +The default value is `true`, which returns an empty `jobs` array +when there are no matches and the subset of results when there are partial +matches. If this parameter is `false`, the request returns a `404` status code +when there are no matches or only partial matches. +-- +end::allow-no-match-jobs[] tag::allow-no-match-models[] Specifies what to do when the request: From 2a30dfe4d213769e5c4982d201117562377e0e8d Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Wed, 24 Nov 2021 14:34:25 -0500 Subject: [PATCH 40/88] [DOCS] Fix `type` response values for index recovery API (#81000) We updated the `type` response values in https://github.com/elastic/elasticsearch/pull/19516. This updates the docs with the correct values. Closes https://github.com/elastic/elasticsearch/issues/80264 Co-authored-by: David Turner --- docs/reference/indices/recovery.asciidoc | 42 +++++++++++++----------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/docs/reference/indices/recovery.asciidoc b/docs/reference/indices/recovery.asciidoc index 8564b5e2b46ed..4a61155175d4d 100644 --- a/docs/reference/indices/recovery.asciidoc +++ b/docs/reference/indices/recovery.asciidoc @@ -44,12 +44,12 @@ the replica shard is available for search. Recovery automatically occurs during the following processes: -* Node startup or failure. This type of recovery is called a local store - recovery. +* Node startup. This type of recovery is called a local store recovery. * Primary shard replication. * Relocation of a shard to a different node in the same cluster. -* <>. - +* <> operation. +* <>, <>, or +<> operation. // end::shard-recovery-desc[] [[index-recovery-api-path-params]] @@ -80,26 +80,28 @@ ID of the shard. `type`:: + -- -(String) -Recovery type. -Returned values include: +(String) Recovery source for the shard. Returned values include: -`STORE`:: -The recovery is related to -a node startup or failure. -This type of recovery is called a local store recovery. +`EMPTY_STORE`:: +An empty store. Indicates a new primary shard or the forced allocation of an +empty primary shard using the <>. -`SNAPSHOT`:: -The recovery is related to -a <>. +`EXISTING_STORE`:: +The store of an existing primary shard. Indicates recovery is related +to node startup or the allocation of an existing primary shard. -`REPLICA`:: -The recovery is related to a primary shard replication. +`LOCAL_SHARDS`:: +Shards of another index on the same node. Indicates recovery is related to a +<>, <>, or +<> operation. -`RELOCATING`:: -The recovery is related to -the relocation of a shard -to a different node in the same cluster. +`PEER`:: +A primary shard on another node. Indicates recovery is related to shard +replication. + +`SNAPSHOT`:: +A snapshot. Indicates recovery is related to a +<> operation. -- `STAGE`:: From 4ff1962ecbc83112aa5db93adbd1963a64cb2bf7 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 24 Nov 2021 13:02:00 -0800 Subject: [PATCH 41/88] Support additional version schemes in relaxed mode (#81010) --- .../src/main/java/org/elasticsearch/gradle/Version.java | 4 +++- .../src/test/java/org/elasticsearch/gradle/VersionTests.java | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/Version.java b/build-tools/src/main/java/org/elasticsearch/gradle/Version.java index a86e16ad740fd..dfa8be295a8f7 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/Version.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/Version.java @@ -39,7 +39,9 @@ public enum Mode { private static final Pattern pattern = Pattern.compile("(\\d+)\\.(\\d+)\\.(\\d+)(?:-(alpha\\d+|beta\\d+|rc\\d+|SNAPSHOT))?"); - private static final Pattern relaxedPattern = Pattern.compile("v?(\\d+)\\.(\\d+)\\.(\\d+)(?:-([a-zA-Z0-9_]+(?:-[a-zA-Z0-9]+)*))?"); + private static final Pattern relaxedPattern = Pattern.compile( + "v?(\\d+)\\.(\\d+)\\.(\\d+)(?:[\\-+]+([a-zA-Z0-9_]+(?:-[a-zA-Z0-9]+)*))?" + ); public Version(int major, int minor, int revision) { this(major, minor, revision, null); diff --git a/build-tools/src/test/java/org/elasticsearch/gradle/VersionTests.java b/build-tools/src/test/java/org/elasticsearch/gradle/VersionTests.java index 2dae3d9f70900..97eb21b814d70 100644 --- a/build-tools/src/test/java/org/elasticsearch/gradle/VersionTests.java +++ b/build-tools/src/test/java/org/elasticsearch/gradle/VersionTests.java @@ -41,6 +41,7 @@ public void testRelaxedVersionParsing() { assertVersionEquals("6.1.2-foo", 6, 1, 2, Version.Mode.RELAXED); assertVersionEquals("6.1.2-foo-bar", 6, 1, 2, Version.Mode.RELAXED); assertVersionEquals("16.01.22", 16, 1, 22, Version.Mode.RELAXED); + assertVersionEquals("20.10.10+dfsg1", 20, 10, 10, Version.Mode.RELAXED); } public void testCompareWithStringVersions() { From b4b489f52e7e0c901df472a2bb57fca6b596f8a0 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Wed, 24 Nov 2021 18:15:32 -0700 Subject: [PATCH 42/88] Fix RefreshListenerTests testDisallowAddListeners (#80575) A recent change to RefreshListeners introduced new functionality sequence number listeners. This change broke a test when the max allowed listeners are randomly set to 2. This commit resolves it by checking whether a new listener is allowed and asserting appropriately. Closes #79689. --- .../index/shard/RefreshListenersTests.java | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 0d6d90b0f246a..7ff97e84a9907 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -17,7 +17,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.Directory; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; @@ -87,7 +86,6 @@ public class RefreshListenersTests extends ESTestCase { private volatile int maxListeners; private ThreadPool threadPool; private Store store; - private MeanMetric refreshMetric; @Before public void setupListeners() throws Exception { @@ -95,18 +93,16 @@ public void setupListeners() throws Exception { maxListeners = randomIntBetween(2, 1000); // Now setup the InternalEngine which is much more complicated because we aren't mocking anything threadPool = new TestThreadPool(getTestName()); - refreshMetric = new MeanMetric(); listeners = new RefreshListeners( () -> maxListeners, () -> engine.refresh("too-many-listeners"), logger, threadPool.getThreadContext(), - refreshMetric + new MeanMetric() ); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY); ShardId shardId = new ShardId(new Index("index", "_na_"), 1); - String allocationId = UUIDs.randomBase64UUID(random()); Directory directory = newDirectory(); store = new Store(shardId, indexSettings, directory, new DummyShardLock(shardId)); IndexWriterConfig iwc = newIndexWriterConfig(); @@ -456,7 +452,6 @@ public void testLotsOfThreads() throws Exception { refresher.cancel(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/79689") public void testDisallowAddListeners() throws Exception { assertEquals(0, listeners.pendingCount()); TestLocationListener listener = new TestLocationListener(); @@ -503,8 +498,21 @@ public void testDisallowAddListeners() throws Exception { } assertFalse(listeners.addOrNotify(index("1").getTranslogLocation(), new TestLocationListener())); - assertFalse(listeners.addOrNotify(index("1").getSeqNo(), new TestSeqNoListener())); - assertEquals(3, listeners.pendingCount()); + final int expectedPending; + if (listeners.pendingCount() == maxListeners) { + // Rejected + TestSeqNoListener rejected = new TestSeqNoListener(); + assertTrue(listeners.addOrNotify(index("1").getSeqNo(), rejected)); + assertNotNull(rejected.error); + expectedPending = 2; + } else { + TestSeqNoListener acceptedListener = new TestSeqNoListener(); + assertFalse(listeners.addOrNotify(index("1").getSeqNo(), acceptedListener)); + assertFalse(acceptedListener.isDone.get()); + assertNull(acceptedListener.error); + expectedPending = 3; + } + assertEquals(expectedPending, listeners.pendingCount()); } public void testSequenceNumberMustBeIssued() throws Exception { From b83b08f9f5f19b3cca87c72e302c0039420ac47e Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 25 Nov 2021 08:39:05 +0000 Subject: [PATCH 43/88] Throw NoSeedNodeLeftException on proxy failure (#80961) Today we throw an `IllegalStateException` if we can't form any connections to a remote cluster in proxy mode, which is typically treated as a non-retryable error. However in sniff mode we throw a `NoSeedNodeLeftException` which does trigger retries. Since connection failures are often transient things (in either mode), this commit moves proxy mode to use a retryable `NoSeedNodeLeftException` in this case too. Closes #80898 --- .../transport/NoSeedNodeLeftException.java | 14 +++++++++++--- .../transport/ProxyConnectionStrategy.java | 4 +--- .../transport/SniffConnectionStrategy.java | 2 +- .../transport/ProxyConnectionStrategyTests.java | 8 +++++++- .../xpack/ccr/action/ShardFollowNodeTask.java | 11 ++++------- 5 files changed, 24 insertions(+), 15 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/transport/NoSeedNodeLeftException.java b/server/src/main/java/org/elasticsearch/transport/NoSeedNodeLeftException.java index 6b727b985abc4..ef5e014b63c36 100644 --- a/server/src/main/java/org/elasticsearch/transport/NoSeedNodeLeftException.java +++ b/server/src/main/java/org/elasticsearch/transport/NoSeedNodeLeftException.java @@ -14,12 +14,20 @@ import java.io.IOException; /** - * Thrown after failed to connect to all seed nodes of the remote cluster. + * Thrown after completely failing to connect to any node of the remote cluster. */ public class NoSeedNodeLeftException extends ElasticsearchException { - public NoSeedNodeLeftException(String clusterName) { - super("no seed node left for cluster: [" + clusterName + "]"); + public NoSeedNodeLeftException(String message) { + super(message); + } + + NoSeedNodeLeftException(RemoteConnectionStrategy.ConnectionStrategy connectionStrategy, String clusterName) { + super( + connectionStrategy == RemoteConnectionStrategy.ConnectionStrategy.SNIFF + ? "no seed node left for cluster: [" + clusterName + "]" + : "Unable to open any proxy connections to cluster [" + clusterName + "]" + ); } public NoSeedNodeLeftException(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index e878796e1fb69..4129f39eb5663 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -287,9 +287,7 @@ public void onFailure(Exception e) { } else { int openConnections = connectionManager.size(); if (openConnections == 0) { - finished.onFailure( - new IllegalStateException("Unable to open any proxy connections to remote cluster [" + clusterAlias + "]") - ); + finished.onFailure(new NoSeedNodeLeftException(strategyType(), clusterAlias)); } else { logger.debug( "unable to open maximum number of connections [remote cluster: {}, opened: {}, maximum: {}]", diff --git a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java index 71cf8807b8a05..df69e3d26d809 100644 --- a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java @@ -339,7 +339,7 @@ private void collectRemoteNodes(Iterator> seedNodesSuppl onFailure.accept(e); }); } else { - listener.onFailure(new NoSeedNodeLeftException(clusterAlias)); + listener.onFailure(new NoSeedNodeLeftException(strategyType(), clusterAlias)); } } diff --git a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java index 41538c1bb20dc..3c2392eeab087 100644 --- a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java @@ -34,6 +34,9 @@ import java.util.function.Supplier; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; + public class ProxyConnectionStrategyTests extends ESTestCase { private final String clusterAlias = "cluster-alias"; @@ -202,7 +205,10 @@ public void testConnectFailsWithIncompatibleNodes() { PlainActionFuture connectFuture = PlainActionFuture.newFuture(); strategy.connect(connectFuture); - expectThrows(Exception.class, connectFuture::actionGet); + assertThat( + expectThrows(NoSeedNodeLeftException.class, connectFuture::actionGet).getMessage(), + allOf(containsString("Unable to open any proxy connections"), containsString('[' + clusterAlias + ']')) + ); assertFalse(connectionManager.getAllConnectedNodes().stream().anyMatch(n -> n.getAddress().equals(address1))); assertEquals(0, connectionManager.size()); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index 2e502e30f53f3..299940ab0894b 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -632,13 +632,10 @@ static boolean shouldRetry(final Exception e) { || actual instanceof NoShardAvailableActionException || actual instanceof UnavailableShardsException || actual instanceof AlreadyClosedException - || actual instanceof ElasticsearchSecurityException - || // If user does not have sufficient privileges - actual instanceof ClusterBlockException - || // If leader index is closed or no elected master - actual instanceof IndexClosedException - || // If follow index is closed - actual instanceof ConnectTransportException + || actual instanceof ElasticsearchSecurityException // If user does not have sufficient privileges + || actual instanceof ClusterBlockException // If leader index is closed or no elected master + || actual instanceof IndexClosedException // If follow index is closed + || actual instanceof ConnectTransportException || actual instanceof NodeClosedException || actual instanceof NoSuchRemoteClusterException || actual instanceof NoSeedNodeLeftException From daf37b484468cd0edf2ff4186d87100bef52917e Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 25 Nov 2021 09:58:18 +0100 Subject: [PATCH 44/88] Cleanup bulk api logic (#80987) * Always check whether it is prohibited to use custom routing on a data stream. * Always invoke prohibitAppendWritesInBackingIndices(...), but in the method check whether the operation is of type index or create. Follow-up from #80624. --- .../action/bulk/TransportBulkAction.java | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 851a45af20ec3..4c02c5969e8d1 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -348,6 +348,11 @@ public void onRejection(Exception rejectedException) { } static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest, Metadata metadata) { + DocWriteRequest.OpType opType = writeRequest.opType(); + if ((opType == OpType.CREATE || opType == OpType.INDEX) == false) { + // op type not create or index, then bail early + return; + } IndexAbstraction indexAbstraction = metadata.getIndicesLookup().get(writeRequest.index()); if (indexAbstraction == null) { return; @@ -365,7 +370,6 @@ static void prohibitAppendWritesInBackingIndices(DocWriteRequest writeRequest // so checking if write op is append-only and if so fail. // (Updates and deletes are allowed to target a backing index) - DocWriteRequest.OpType opType = writeRequest.opType(); // CREATE op_type is considered append-only and // INDEX op_type is considered append-only when no if_primary_term and if_seq_no is specified. // (the latter maybe an update, but at this stage we can't determine that. In order to determine @@ -524,10 +528,8 @@ protected void doRun() { throw new IllegalArgumentException("only write ops with an op_type of create are allowed in data streams"); } - if (docWriteRequest.opType() == OpType.CREATE || docWriteRequest.opType() == OpType.INDEX) { - prohibitAppendWritesInBackingIndices(docWriteRequest, metadata); - prohibitCustomRoutingOnDataStream(docWriteRequest, metadata); - } + prohibitCustomRoutingOnDataStream(docWriteRequest, metadata); + prohibitAppendWritesInBackingIndices(docWriteRequest, metadata); docWriteRequest.routing(metadata.resolveWriteIndexRouting(docWriteRequest.routing(), docWriteRequest.index())); docWriteRequest.process(); From c67b47078970319ea9b7540c1fecbf6c1656e375 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 25 Nov 2021 10:49:41 +0100 Subject: [PATCH 45/88] Reuse MappingMetadata instances in Metadata class. (#80348) Hash the mapping source of a MappingMetadata instance and then cache it in Metadata class. A mapping with the same hash will use a cached MappingMetadata instance. This can significantly reduce the number of MappingMetadata instances for data streams and index patterns. Idea originated from #69772, but just focusses on the jvm heap memory savings. And hashes the mapping instead of assigning it an uuid. Relates to #77466 --- .../cluster/metadata/IndexMetadata.java | 39 +++++ .../cluster/metadata/MappingMetadata.java | 4 + .../cluster/metadata/Metadata.java | 84 ++++++++++- .../common/compress/CompressedXContent.java | 137 +++++++++--------- .../cluster/metadata/MetadataTests.java | 113 +++++++++++++++ .../DeflateCompressedXContentTests.java | 10 +- 6 files changed, 304 insertions(+), 83 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 853ff0bf5c0de..409fe6b934085 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -561,6 +561,45 @@ private IndexMetadata( assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } + IndexMetadata withMappingMetadata(MappingMetadata mapping) { + ImmutableOpenMap.Builder mappingBuilder = ImmutableOpenMap.builder(); + mappingBuilder.put(MapperService.SINGLE_MAPPING_NAME, mapping); + + return new IndexMetadata( + this.index, + this.version, + this.mappingVersion, + this.settingsVersion, + this.aliasesVersion, + this.primaryTerms, + this.state, + this.numberOfShards, + this.numberOfReplicas, + this.settings, + mappingBuilder.build(), + this.aliases, + this.customData, + this.inSyncAllocationIds, + this.requireFilters, + this.initialRecoveryFilters, + this.includeFilters, + this.excludeFilters, + this.indexCreatedVersion, + this.routingNumShards, + this.routingPartitionSize, + this.routingPaths, + this.waitForActiveShards, + this.rolloverInfos, + this.isSystem, + this.isHidden, + this.timestampRange, + this.priority, + this.creationDate, + this.ignoreDiskWatermarks, + this.tierPreference + ); + } + public Index getIndex() { return index; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java index 3fd51a03aa08c..b42c5bff0eb82 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java @@ -143,6 +143,10 @@ public boolean routingRequired() { return this.routingRequired; } + public String getSha256() { + return source.getSha256(); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(type()); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 37cf925aef029..531f85d981827 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -208,6 +208,7 @@ public interface NonRestorableCustom extends Custom {} private final String[] visibleClosedIndices; private SortedMap indicesLookup; + private final Map mappingsByHash; private Metadata( String clusterUUID, @@ -229,7 +230,8 @@ private Metadata( String[] visibleOpenIndices, String[] allClosedIndices, String[] visibleClosedIndices, - SortedMap indicesLookup + SortedMap indicesLookup, + Map mappingsByHash ) { this.clusterUUID = clusterUUID; this.clusterUUIDCommitted = clusterUUIDCommitted; @@ -251,6 +253,7 @@ private Metadata( this.allClosedIndices = allClosedIndices; this.visibleClosedIndices = visibleClosedIndices; this.indicesLookup = indicesLookup; + this.mappingsByHash = mappingsByHash; } public Metadata withIncrementedVersion() { @@ -274,7 +277,8 @@ public Metadata withIncrementedVersion() { visibleOpenIndices, allClosedIndices, visibleClosedIndices, - indicesLookup + indicesLookup, + mappingsByHash ); } @@ -927,6 +931,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + Map getMappingsByHash() { + return mappingsByHash; + } + private static class MetadataDiff implements Diff { private final long version; @@ -1081,6 +1089,7 @@ public static class Builder { private final ImmutableOpenMap.Builder customs; private SortedMap previousIndicesLookup; + private final Map mappingsByHash; public Builder() { clusterUUID = UNKNOWN_CLUSTER_UUID; @@ -1089,6 +1098,7 @@ public Builder() { customs = ImmutableOpenMap.builder(); indexGraveyard(IndexGraveyard.builder().build()); // create new empty index graveyard to initialize previousIndicesLookup = null; + mappingsByHash = new HashMap<>(); } Builder(Metadata metadata) { @@ -1103,11 +1113,13 @@ public Builder() { this.templates = ImmutableOpenMap.builder(metadata.templates); this.customs = ImmutableOpenMap.builder(metadata.customs); previousIndicesLookup = metadata.getIndicesLookup(); + this.mappingsByHash = new HashMap<>(metadata.mappingsByHash); } public Builder put(IndexMetadata.Builder indexMetadataBuilder) { // we know its a new one, increment the version and store indexMetadataBuilder.version(indexMetadataBuilder.version() + 1); + dedupeMapping(indexMetadataBuilder); IndexMetadata indexMetadata = indexMetadataBuilder.build(); IndexMetadata previous = indices.put(indexMetadata.getIndex().getName(), indexMetadata); if (unsetPreviousIndicesLookup(previous, indexMetadata)) { @@ -1120,6 +1132,7 @@ public Builder put(IndexMetadata indexMetadata, boolean incrementVersion) { if (indices.get(indexMetadata.getIndex().getName()) == indexMetadata) { return this; } + indexMetadata = dedupeMapping(indexMetadata); // if we put a new index metadata, increment its version if (incrementVersion) { indexMetadata = IndexMetadata.builder(indexMetadata).version(indexMetadata.getVersion() + 1).build(); @@ -1186,13 +1199,16 @@ public Builder removeAllIndices() { previousIndicesLookup = null; indices.clear(); + mappingsByHash.clear(); return this; } public Builder indices(ImmutableOpenMap indices) { previousIndicesLookup = null; - this.indices.putAll(indices); + for (var cursor : indices) { + put(cursor.value, false); + } return this; } @@ -1637,6 +1653,8 @@ public Metadata build(boolean builtIndicesLookupEagerly) { } } + purgeUnusedEntries(indices); + // build all concrete indices arrays: // TODO: I think we can remove these arrays. it isn't worth the effort, for operations on all indices. // When doing an operation across all indices, most of the time is spent on actually going to all shards and @@ -1677,7 +1695,8 @@ public Metadata build(boolean builtIndicesLookupEagerly) { visibleOpenIndicesArray, allClosedIndicesArray, visibleClosedIndicesArray, - indicesLookup + indicesLookup, + Collections.unmodifiableMap(mappingsByHash) ); } @@ -1896,6 +1915,63 @@ public static Metadata fromXContent(XContentParser parser) throws IOException { XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); return builder.build(); } + + /** + * Dedupes {@link MappingMetadata} instance from the provided indexMetadata parameter using the sha256 + * hash from the compressed source of the mapping. If there is a mapping with the same sha256 hash then + * a new {@link IndexMetadata} is returned with the found {@link MappingMetadata} instance, otherwise + * the {@link MappingMetadata} instance of the indexMetadata parameter is recorded and the indexMetadata + * parameter is then returned. + */ + private IndexMetadata dedupeMapping(IndexMetadata indexMetadata) { + if (indexMetadata.mapping() == null) { + return indexMetadata; + } + + String digest = indexMetadata.mapping().getSha256(); + MappingMetadata entry = mappingsByHash.get(digest); + if (entry != null) { + return indexMetadata.withMappingMetadata(entry); + } else { + mappingsByHash.put(digest, indexMetadata.mapping()); + return indexMetadata; + } + } + + /** + * Similar to {@link #dedupeMapping(IndexMetadata)}. + */ + private void dedupeMapping(IndexMetadata.Builder indexMetadataBuilder) { + if (indexMetadataBuilder.mapping() == null) { + return; + } + + String digest = indexMetadataBuilder.mapping().getSha256(); + MappingMetadata entry = mappingsByHash.get(digest); + if (entry != null) { + indexMetadataBuilder.putMapping(entry); + } else { + mappingsByHash.put(digest, indexMetadataBuilder.mapping()); + } + } + + private void purgeUnusedEntries(ImmutableOpenMap indices) { + final Set sha256HashesInUse = new HashSet<>(mappingsByHash.size()); + for (var im : indices.values()) { + if (im.mapping() != null) { + sha256HashesInUse.add(im.mapping().getSha256()); + } + } + + final var iterator = mappingsByHash.entrySet().iterator(); + while (iterator.hasNext()) { + final var cacheKey = iterator.next().getKey(); + if (sha256HashesInUse.contains(cacheKey) == false) { + iterator.remove(); + } + } + } + } private static final ToXContent.Params FORMAT_PARAMS; diff --git a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java index 1f99090ae813d..d43c78792938a 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java +++ b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java @@ -9,8 +9,10 @@ package org.elasticsearch.common.compress; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.hash.MessageDigests; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -24,9 +26,10 @@ import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; -import java.util.Arrays; +import java.security.DigestOutputStream; +import java.security.MessageDigest; +import java.util.Base64; import java.util.zip.CRC32; -import java.util.zip.CheckedOutputStream; import java.util.zip.DataFormatException; import java.util.zip.Inflater; @@ -38,46 +41,45 @@ */ public final class CompressedXContent { - private static final ThreadLocal inflater1 = ThreadLocal.withInitial(InflaterAndBuffer::new); - private static final ThreadLocal inflater2 = ThreadLocal.withInitial(InflaterAndBuffer::new); + private static final ThreadLocal inflater = ThreadLocal.withInitial(InflaterAndBuffer::new); - private static int crc32(BytesReference data) { - CRC32 crc32 = new CRC32(); + private static String sha256(BytesReference data) { + MessageDigest messageDigest = MessageDigests.sha256(); try { - data.writeTo(new CheckedOutputStream(Streams.NULL_OUTPUT_STREAM, crc32)); + data.writeTo(new DigestOutputStream(Streams.NULL_OUTPUT_STREAM, messageDigest)); } catch (IOException bogus) { // cannot happen throw new Error(bogus); } - return (int) crc32.getValue(); + return Base64.getEncoder().encodeToString(messageDigest.digest()); } - private static int crc32FromCompressed(byte[] compressed) { - CRC32 crc32 = new CRC32(); - try (InflaterAndBuffer inflaterAndBuffer = inflater1.get()) { + private static String sha256FromCompressed(byte[] compressed) { + MessageDigest messageDigest = MessageDigests.sha256(); + try (InflaterAndBuffer inflaterAndBuffer = inflater.get()) { final Inflater inflater = inflaterAndBuffer.inflater; final ByteBuffer buffer = inflaterAndBuffer.buffer; assert assertBufferIsCleared(buffer); setInflaterInput(compressed, inflater); do { if (inflater.inflate(buffer) > 0) { - crc32.update(buffer.flip()); + messageDigest.update(buffer.flip()); } buffer.clear(); } while (inflater.finished() == false); - return (int) crc32.getValue(); + return Base64.getEncoder().encodeToString(messageDigest.digest()); } catch (DataFormatException e) { throw new ElasticsearchException(e); } } private final byte[] bytes; - private final int crc32; + private final String sha256; // Used for serialization - private CompressedXContent(byte[] compressed, int crc32) { + private CompressedXContent(byte[] compressed, String sha256) { this.bytes = compressed; - this.crc32 = crc32; + this.sha256 = sha256; assertConsistent(); } @@ -90,8 +92,8 @@ public CompressedXContent(ToXContent xcontent) throws IOException { */ public CompressedXContent(ToXContent xcontent, ToXContent.Params params) throws IOException { BytesStreamOutput bStream = new BytesStreamOutput(); - CRC32 crc32 = new CRC32(); - OutputStream checkedStream = new CheckedOutputStream(CompressorFactory.COMPRESSOR.threadLocalOutputStream(bStream), crc32); + MessageDigest messageDigest = MessageDigests.sha256(); + OutputStream checkedStream = new DigestOutputStream(CompressorFactory.COMPRESSOR.threadLocalOutputStream(bStream), messageDigest); try (XContentBuilder builder = XContentFactory.jsonBuilder(checkedStream)) { if (xcontent.isFragment()) { builder.startObject(); @@ -102,7 +104,7 @@ public CompressedXContent(ToXContent xcontent, ToXContent.Params params) throws } } this.bytes = BytesReference.toBytes(bStream.bytes()); - this.crc32 = (int) crc32.getValue(); + this.sha256 = Base64.getEncoder().encodeToString(messageDigest.digest()); assertConsistent(); } @@ -115,18 +117,18 @@ public CompressedXContent(BytesReference data) throws IOException { if (compressor != null) { // already compressed... this.bytes = BytesReference.toBytes(data); - this.crc32 = crc32FromCompressed(this.bytes); + this.sha256 = sha256FromCompressed(this.bytes); } else { this.bytes = BytesReference.toBytes(CompressorFactory.COMPRESSOR.compress(data)); - this.crc32 = crc32(data); + this.sha256 = sha256(data); } assertConsistent(); } private void assertConsistent() { assert CompressorFactory.compressor(new BytesArray(bytes)) != null; - assert this.crc32 == crc32(uncompressed()); - assert this.crc32 == crc32FromCompressed(bytes); + assert this.sha256.equals(sha256(uncompressed())); + assert this.sha256.equals(sha256FromCompressed(bytes)); } public CompressedXContent(byte[] data) throws IOException { @@ -160,13 +162,31 @@ public String string() { return uncompressed().utf8ToString(); } + public String getSha256() { + return sha256; + } + public static CompressedXContent readCompressedString(StreamInput in) throws IOException { - int crc32 = in.readInt(); - return new CompressedXContent(in.readByteArray(), crc32); + final String sha256; + final byte[] compressedData; + if (in.getVersion().onOrAfter(Version.V_8_1_0)) { + sha256 = in.readString(); + compressedData = in.readByteArray(); + } else { + int crc32 = in.readInt(); + compressedData = in.readByteArray(); + sha256 = sha256FromCompressed(compressedData); + } + return new CompressedXContent(compressedData, sha256); } public void writeTo(StreamOutput out) throws IOException { - out.writeInt(crc32); + if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + out.writeString(sha256); + } else { + int crc32 = crc32FromCompressed(bytes); + out.writeInt(crc32); + } out.writeByteArray(bytes); } @@ -176,54 +196,12 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; CompressedXContent that = (CompressedXContent) o; - - if (crc32 != that.crc32) { - return false; - } - - if (Arrays.equals(bytes, that.bytes)) { - return true; - } - // compression is not entirely deterministic in all cases depending on hwo the compressed bytes were assembled, check uncompressed - // equality - return equalsWhenUncompressed(bytes, that.bytes); - } - - // package private for testing - static boolean equalsWhenUncompressed(byte[] compressed1, byte[] compressed2) { - try (InflaterAndBuffer inflaterAndBuffer1 = inflater1.get(); InflaterAndBuffer inflaterAndBuffer2 = inflater2.get()) { - final Inflater inf1 = inflaterAndBuffer1.inflater; - final Inflater inf2 = inflaterAndBuffer2.inflater; - setInflaterInput(compressed1, inf1); - setInflaterInput(compressed2, inf2); - final ByteBuffer buf1 = inflaterAndBuffer1.buffer; - assert assertBufferIsCleared(buf1); - final ByteBuffer buf2 = inflaterAndBuffer2.buffer; - assert assertBufferIsCleared(buf2); - while (true) { - while (inf1.inflate(buf1) > 0 && buf1.hasRemaining()) - ; - while (inf2.inflate(buf2) > 0 && buf2.hasRemaining()) - ; - if (buf1.flip().equals(buf2.flip()) == false) { - return false; - } - if (inf1.finished()) { - // if the first inflater is done but the second one still has data we fail here, if it's the other way around we fail - // on the next round because we will only read bytes into 2 - return inf2.finished(); - } - buf1.clear(); - buf2.clear(); - } - } catch (DataFormatException e) { - throw new ElasticsearchException(e); - } + return sha256.equals(that.sha256); } @Override public int hashCode() { - return crc32; + return sha256.hashCode(); } @Override @@ -231,6 +209,25 @@ public String toString() { return string(); } + private static int crc32FromCompressed(byte[] compressed) { + CRC32 crc32 = new CRC32(); + try (InflaterAndBuffer inflaterAndBuffer = inflater.get()) { + final Inflater inflater = inflaterAndBuffer.inflater; + final ByteBuffer buffer = inflaterAndBuffer.buffer; + assert assertBufferIsCleared(buffer); + setInflaterInput(compressed, inflater); + do { + if (inflater.inflate(buffer) > 0) { + crc32.update(buffer.flip()); + } + buffer.clear(); + } while (inflater.finished() == false); + return (int) crc32.getValue(); + } catch (DataFormatException e) { + throw new ElasticsearchException(e); + } + } + /** * Set the given bytes as inflater input, accounting for the fact that they start with our header of size * {@link DeflateCompressor#HEADER_SIZE}. diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index 49225f58059b2..7061d9556b82c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -28,6 +28,8 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.alias.RandomAliasActionsGenerator; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; @@ -60,6 +62,7 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -1980,6 +1983,116 @@ private IndexMetadata buildIndexWithAlias( .build(); } + public void testMappingDuplication() { + final Set randomMappingDefinitions; + { + int numEntries = randomIntBetween(4, 8); + randomMappingDefinitions = new HashSet<>(numEntries); + for (int i = 0; i < numEntries; i++) { + Map mapping = RandomAliasActionsGenerator.randomMap(2); + String mappingAsString = Strings.toString((builder, params) -> builder.mapContents(mapping)); + randomMappingDefinitions.add(mappingAsString); + } + } + + Metadata metadata; + int numIndices = randomIntBetween(16, 32); + { + String[] definitions = randomMappingDefinitions.toArray(String[]::new); + Metadata.Builder mb = new Metadata.Builder(); + for (int i = 0; i < numIndices; i++) { + IndexMetadata.Builder indexBuilder = IndexMetadata.builder("index-" + i) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)) + .putMapping(definitions[i % randomMappingDefinitions.size()]) + .numberOfShards(1) + .numberOfReplicas(0); + if (randomBoolean()) { + mb.put(indexBuilder); + } else { + mb.put(indexBuilder.build(), true); + } + } + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size())); + assertThat( + metadata.indices().stream().map(entry -> entry.getValue().mapping()).collect(Collectors.toSet()), + hasSize(metadata.getMappingsByHash().size()) + ); + + // Add a new index with a new index with known mapping: + MappingMetadata mapping = metadata.indices().get("index-" + randomInt(numIndices - 1)).mapping(); + MappingMetadata entry = metadata.getMappingsByHash().get(mapping.getSha256()); + { + Metadata.Builder mb = new Metadata.Builder(metadata); + mb.put( + IndexMetadata.builder("index-" + numIndices) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)) + .putMapping(mapping) + .numberOfShards(1) + .numberOfReplicas(0) + ); + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size())); + assertThat(metadata.getMappingsByHash().get(mapping.getSha256()), equalTo(entry)); + + // Remove index and ensure mapping cache stays the same + { + Metadata.Builder mb = new Metadata.Builder(metadata); + mb.remove("index-" + numIndices); + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size())); + assertThat(metadata.getMappingsByHash().get(mapping.getSha256()), equalTo(entry)); + + // Update a mapping of an index: + IndexMetadata luckyIndex = metadata.index("index-" + randomInt(numIndices - 1)); + entry = metadata.getMappingsByHash().get(luckyIndex.mapping().getSha256()); + MappingMetadata updatedMapping = new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, Map.of("mapping", "updated")); + { + Metadata.Builder mb = new Metadata.Builder(metadata); + mb.put(IndexMetadata.builder(luckyIndex).putMapping(updatedMapping)); + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size() + 1)); + assertThat(metadata.getMappingsByHash().get(luckyIndex.mapping().getSha256()), equalTo(entry)); + assertThat(metadata.getMappingsByHash().get(updatedMapping.getSha256()), equalTo(updatedMapping)); + + // Remove the index with updated mapping + { + Metadata.Builder mb = new Metadata.Builder(metadata); + mb.remove(luckyIndex.getIndex().getName()); + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size())); + assertThat(metadata.getMappingsByHash().get(updatedMapping.getSha256()), nullValue()); + + // Add an index with new mapping and then later remove it: + MappingMetadata newMapping = new MappingMetadata(MapperService.SINGLE_MAPPING_NAME, Map.of("new", "mapping")); + { + Metadata.Builder mb = new Metadata.Builder(metadata); + mb.put( + IndexMetadata.builder("index-" + numIndices) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT)) + .putMapping(newMapping) + .numberOfShards(1) + .numberOfReplicas(0) + ); + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size() + 1)); + assertThat(metadata.getMappingsByHash().get(newMapping.getSha256()), equalTo(newMapping)); + + { + Metadata.Builder mb = new Metadata.Builder(metadata); + mb.remove("index-" + numIndices); + metadata = mb.build(); + } + assertThat(metadata.getMappingsByHash().size(), equalTo(randomMappingDefinitions.size())); + assertThat(metadata.getMappingsByHash().get(newMapping.getSha256()), nullValue()); + } + public static Metadata randomMetadata() { return randomMetadata(1); } diff --git a/server/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java b/server/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java index 23dc9a601e8ab..a0b295117812e 100644 --- a/server/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java +++ b/server/src/test/java/org/elasticsearch/common/compress/DeflateCompressedXContentTests.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.io.OutputStream; -import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Random; @@ -134,13 +133,6 @@ public void testEqualsWhenUncompressed() throws IOException { final CompressedXContent two = new CompressedXContent( (builder, params) -> builder.stringListField("arr", Arrays.asList(randomJSON2)) ); - assertFalse(CompressedXContent.equalsWhenUncompressed(one.compressed(), two.compressed())); - } - - public void testEqualsCrcCollision() throws IOException { - final CompressedXContent content1 = new CompressedXContent("{\"d\":\"68&A<\"}".getBytes(StandardCharsets.UTF_8)); - final CompressedXContent content2 = new CompressedXContent("{\"d\":\"gZG- \"}".getBytes(StandardCharsets.UTF_8)); - assertEquals(content1.hashCode(), content2.hashCode()); // the inputs are a known CRC32 collision - assertNotEquals(content1, content2); + assertNotEquals(one.uncompressed(), two.uncompressed()); } } From 73e71009b096ed3e123a334f567712fea99558dd Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Thu, 25 Nov 2021 12:33:43 +0000 Subject: [PATCH 46/88] Fix split package org.apache.lucene.search.vectorhighlight (#81041) This PR moves the CustomFieldQuery class from org.apache.lucene.search.vectorhighlight to org.elasticsearch.lucene.search.vectorhighlight, thus avoiding the split package with lucene. It would appear that when CustomFieldQuery was originally conceived, it needed package-private access to its superclass, FieldQuery, but this is no longer the case (the superclass now exposes the necessary members publicly). --- server/build.gradle | 1 - .../lucene/search/vectorhighlight/CustomFieldQuery.java | 4 +++- .../fetch/subphase/highlight/FastVectorHighlighter.java | 2 +- .../org/elasticsearch/deps/lucene/VectorHighlighterTests.java | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) rename server/src/main/java/org/{apache => elasticsearch}/lucene/search/vectorhighlight/CustomFieldQuery.java (97%) diff --git a/server/build.gradle b/server/build.gradle index 9a87155eb86c1..781ab3c31958d 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -259,7 +259,6 @@ tasks.named('splitPackagesAudit').configure { ignoreClasses 'org.apache.lucene.queries.BinaryDocValuesRangeQuery', 'org.apache.lucene.queries.BlendedTermQuery', 'org.apache.lucene.queries.SpanMatchNoDocsQuery', - 'org.apache.lucene.search.vectorhighlight.CustomFieldQuery', // These are tricky because Lucene itself splits the index package, // but this should be fixed in Lucene 9 diff --git a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java b/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java similarity index 97% rename from server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java rename to server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java index 3a3439a326d30..5e90871e4689a 100644 --- a/server/src/main/java/org/apache/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.apache.lucene.search.vectorhighlight; +package org.elasticsearch.lucene.search.vectorhighlight; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; @@ -20,6 +20,8 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.SynonymQuery; import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter; +import org.apache.lucene.search.vectorhighlight.FieldQuery; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java index 536909140271c..01c6a60552b70 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/FastVectorHighlighter.java @@ -11,7 +11,6 @@ import org.apache.lucene.search.vectorhighlight.BaseFragmentsBuilder; import org.apache.lucene.search.vectorhighlight.BoundaryScanner; import org.apache.lucene.search.vectorhighlight.BreakIteratorBoundaryScanner; -import org.apache.lucene.search.vectorhighlight.CustomFieldQuery; import org.apache.lucene.search.vectorhighlight.FieldFragList; import org.apache.lucene.search.vectorhighlight.FieldQuery; import org.apache.lucene.search.vectorhighlight.FragListBuilder; @@ -27,6 +26,7 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.TextSearchInfo; +import org.elasticsearch.lucene.search.vectorhighlight.CustomFieldQuery; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.subphase.highlight.SearchHighlightContext.Field; import org.elasticsearch.search.fetch.subphase.highlight.SearchHighlightContext.FieldOptions; diff --git a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java index dac759607954b..4992b2e87e0d1 100644 --- a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java +++ b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java @@ -22,11 +22,11 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.search.vectorhighlight.CustomFieldQuery; import org.apache.lucene.search.vectorhighlight.FastVectorHighlighter; import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.lucene.search.vectorhighlight.CustomFieldQuery; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.equalTo; From 60b3ca674f84b248037ce04ccae00072753db525 Mon Sep 17 00:00:00 2001 From: Artem Prigoda Date: Thu, 25 Nov 2021 13:43:50 +0100 Subject: [PATCH 47/88] Add support for directly iterating over arrays (#80469) Avoid creating a stream and a spliterators just for a simple iteration over an array. Co-authored-by: David Turner --- .../mustache/MultiSearchTemplateResponse.java | 4 +- .../indices/segments/IndexShardSegments.java | 4 +- .../admin/indices/stats/IndexShardStats.java | 4 +- .../action/bulk/BulkResponse.java | 4 +- .../action/get/MultiGetResponse.java | 4 +- .../action/search/MultiSearchResponse.java | 4 +- .../termvectors/MultiTermVectorsResponse.java | 4 +- .../common/collect/Iterators.java | 27 +++++++++++ .../org/elasticsearch/monitor/fs/FsInfo.java | 4 +- .../elasticsearch/monitor/jvm/JvmStats.java | 4 +- .../org/elasticsearch/search/SearchHits.java | 3 +- .../common/collect/IteratorsTests.java | 47 +++++++++++++++++++ 12 files changed, 94 insertions(+), 19 deletions(-) diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index 3dff5b0f4a853..86cba18da06c1 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -24,7 +25,6 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.Arrays; import java.util.Iterator; public class MultiSearchTemplateResponse extends ActionResponse implements Iterable, ToXContentObject { @@ -115,7 +115,7 @@ public String toString() { @Override public Iterator iterator() { - return Arrays.stream(items).iterator(); + return Iterators.forArray(items); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java index afe0e4855f065..16ce3bb078ade 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndexShardSegments.java @@ -8,9 +8,9 @@ package org.elasticsearch.action.admin.indices.segments; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.index.shard.ShardId; -import java.util.Arrays; import java.util.Iterator; public class IndexShardSegments implements Iterable { @@ -38,6 +38,6 @@ public ShardSegments[] getShards() { @Override public Iterator iterator() { - return Arrays.stream(shards).iterator(); + return Iterators.forArray(shards); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java index 5b52a7209ef71..18a9c9d1ba27a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndexShardStats.java @@ -8,13 +8,13 @@ package org.elasticsearch.action.admin.indices.stats; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -import java.util.Arrays; import java.util.Iterator; public class IndexShardStats implements Iterable, Writeable { @@ -47,7 +47,7 @@ public ShardStats getAt(int position) { @Override public Iterator iterator() { - return Arrays.stream(shards).iterator(); + return Iterators.forArray(shards); } private CommonStats total = null; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index 258d91c712ba7..ae3c53ca5b8c8 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.StatusToXContentObject; @@ -19,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -124,7 +124,7 @@ public BulkItemResponse[] getItems() { @Override public Iterator iterator() { - return Arrays.stream(responses).iterator(); + return Iterators.forArray(responses); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index 57158194416b9..eb979bc578554 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -11,6 +11,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -27,7 +28,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -130,7 +130,7 @@ public MultiGetItemResponse[] getResponses() { @Override public Iterator iterator() { - return Arrays.stream(responses).iterator(); + return Iterators.forArray(responses); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index c40e622468759..041fdfeca76eb 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -12,6 +12,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -25,7 +26,6 @@ import org.elasticsearch.xcontent.XContentParser.Token; import java.io.IOException; -import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -126,7 +126,7 @@ public MultiSearchResponse(Item[] items, long tookInMillis) { @Override public Iterator iterator() { - return Arrays.stream(items).iterator(); + return Iterators.forArray(items); } /** diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java index 52f389d7f4e19..bfa07fa55d3f9 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java @@ -11,6 +11,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -20,7 +21,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Arrays; import java.util.Iterator; public class MultiTermVectorsResponse extends ActionResponse implements Iterable, ToXContentObject { @@ -102,7 +102,7 @@ public MultiTermVectorsItemResponse[] getResponses() { @Override public Iterator iterator() { - return Arrays.stream(responses).iterator(); + return Iterators.forArray(responses); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java index 94ebb0261b270..a2629ffd0556c 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -83,4 +83,31 @@ public T next() { return iterators[index].next(); } } + + public static Iterator forArray(T[] array) { + return new ArrayIterator<>(array); + } + + private static final class ArrayIterator implements Iterator { + + private final T[] array; + private int index; + + private ArrayIterator(T[] array) { + this.array = Objects.requireNonNull(array, "Unable to iterate over a null array"); + } + + @Override + public boolean hasNext() { + return index < array.length; + } + + @Override + public T next() { + if (index >= array.length) { + throw new NoSuchElementException(); + } + return array[index++]; + } + } } diff --git a/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java b/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java index 2054063e98a88..1b7ba960316a3 100644 --- a/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/fs/FsInfo.java @@ -8,6 +8,7 @@ package org.elasticsearch.monitor.fs; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -18,7 +19,6 @@ import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; import java.util.Set; @@ -495,7 +495,7 @@ public IoStats getIoStats() { @Override public Iterator iterator() { - return Arrays.stream(paths).iterator(); + return Iterators.forArray(paths); } @Override diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java index 474ed7382c12a..04901d89c005a 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java @@ -8,6 +8,7 @@ package org.elasticsearch.monitor.jvm; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -27,7 +28,6 @@ import java.lang.management.RuntimeMXBean; import java.lang.management.ThreadMXBean; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -346,7 +346,7 @@ public GarbageCollector[] getCollectors() { @Override public Iterator iterator() { - return Arrays.stream(collectors).iterator(); + return Iterators.forArray(collectors); } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchHits.java b/server/src/main/java/org/elasticsearch/search/SearchHits.java index fc9aa68a0f1f5..e5db8b81bbe61 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHits.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHits.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TotalHits.Relation; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -170,7 +171,7 @@ public Object[] getCollapseValues() { @Override public Iterator iterator() { - return Arrays.stream(getHits()).iterator(); + return Iterators.forArray(getHits()); } public static final class Fields { diff --git a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java index 2b77d4b6a4005..3a750419c2090 100644 --- a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.common.collect; +import org.elasticsearch.common.Randomness; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -16,6 +17,7 @@ import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; +import java.util.concurrent.atomic.AtomicInteger; public class IteratorsTests extends ESTestCase { public void testConcatentation() { @@ -110,6 +112,51 @@ public void testNullIterator() { } } + public void testArrayIterator() { + Integer[] array = randomIntegerArray(); + Iterator iterator = Iterators.forArray(array); + + int i = 0; + while (iterator.hasNext()) { + assertEquals(array[i++], iterator.next()); + } + assertEquals(array.length, i); + } + + public void testArrayIteratorForEachRemaining() { + Integer[] array = randomIntegerArray(); + Iterator iterator = Iterators.forArray(array); + + AtomicInteger index = new AtomicInteger(); + iterator.forEachRemaining(i -> assertEquals(array[index.getAndIncrement()], i)); + assertEquals(array.length, index.get()); + } + + public void testArrayIteratorIsUnmodifiable() { + Integer[] array = randomIntegerArray(); + Iterator iterator = Iterators.forArray(array); + + expectThrows(UnsupportedOperationException.class, iterator::remove); + } + + public void testArrayIteratorThrowsNoSuchElementExceptionWhenDepleted() { + Integer[] array = randomIntegerArray(); + Iterator iterator = Iterators.forArray(array); + for (int i = 0; i < array.length; i++) { + iterator.next(); + } + + expectThrows(NoSuchElementException.class, iterator::next); + } + + public void testArrayIteratorOnNull() { + expectThrows(NullPointerException.class, "Unable to iterate over a null array", () -> Iterators.forArray(null)); + } + + private static Integer[] randomIntegerArray() { + return Randomness.get().ints(randomIntBetween(0, 1000)).boxed().toArray(Integer[]::new); + } + private Iterator singletonIterator(T value) { return Collections.singleton(value).iterator(); } From a139aff04726d6b9ffb3562d165a6ad129b6eab9 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 25 Nov 2021 13:55:41 +0100 Subject: [PATCH 48/88] Disable bwc tests in order to backport #80348 (#81046) --- build.gradle | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build.gradle b/build.gradle index 59fc13e9e1ac8..9d5ab644667cc 100644 --- a/build.gradle +++ b/build.gradle @@ -131,9 +131,9 @@ tasks.register("verifyVersions") { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = true +boolean bwc_tests_enabled = false // place a PR link here when committing bwc changes: -String bwc_tests_disabled_issue = "" +String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/80348" /* * FIPS 140-2 behavior was fixed in 7.11.0. Before that there is no way to run elasticsearch in a * JVM that is properly configured to be in fips mode with BCFIPS. For now we need to disable From 15de6035148fd7904bb4f609f10d9b29af492a4f Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Thu, 25 Nov 2021 13:34:56 +0000 Subject: [PATCH 49/88] Fix split package org.apache.lucene.queries (#81043) --- .../java/org/elasticsearch/percolator/QueryAnalyzer.java | 2 +- .../org/elasticsearch/percolator/CandidateQueryTests.java | 2 +- .../org/elasticsearch/percolator/QueryAnalyzerTests.java | 2 +- server/build.gradle | 6 +----- .../search/SpanBooleanQueryRewriteWithMaxClause.java | 2 +- .../java/org/elasticsearch/index/mapper/RangeType.java | 2 +- .../index/query/SpanMultiTermQueryBuilder.java | 2 +- .../elasticsearch/index/search/MultiMatchQueryParser.java | 2 +- .../lucene/queries/BinaryDocValuesRangeQuery.java | 2 +- .../lucene/queries/BlendedTermQuery.java | 2 +- .../lucene/queries/SpanMatchNoDocsQuery.java | 2 +- .../lucene/search/vectorhighlight/CustomFieldQuery.java | 2 +- .../mapper/RangeFieldQueryStringQueryBuilderTests.java | 2 +- .../elasticsearch/index/mapper/RangeFieldTypeTests.java | 2 +- .../index/query/QueryStringQueryBuilderTests.java | 2 +- .../index/query/SpanMultiTermQueryBuilderTests.java | 2 +- .../index/query/SpanNearQueryBuilderTests.java | 2 +- .../index/search/MultiMatchQueryParserTests.java | 2 +- .../BaseRandomBinaryDocValuesRangeQueryTestCase.java | 8 ++++---- .../lucene/queries/BinaryDocValuesRangeQueryTests.java | 8 ++++---- .../lucene/queries/BlendedTermQueryTests.java | 1 - .../lucene/queries/SpanMatchNoDocsQueryTests.java | 1 - .../histogram/DateRangeHistogramAggregatorTests.java | 2 +- 23 files changed, 27 insertions(+), 33 deletions(-) rename server/src/main/java/org/{apache => elasticsearch}/lucene/queries/BinaryDocValuesRangeQuery.java (99%) rename server/src/main/java/org/{apache => elasticsearch}/lucene/queries/BlendedTermQuery.java (99%) rename server/src/main/java/org/{apache => elasticsearch}/lucene/queries/SpanMatchNoDocsQuery.java (98%) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java index 4fedaa9177cd0..f5b2e2ba54c11 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java @@ -9,7 +9,6 @@ import org.apache.lucene.index.PrefixCodedTerms; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.spans.SpanOrQuery; import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.search.BooleanClause.Occur; @@ -30,6 +29,7 @@ import org.apache.lucene.util.automaton.ByteRunAutomaton; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.index.query.DateRangeIncludingNowQuery; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import java.util.ArrayList; import java.util.Arrays; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 636d49d0139c7..06b5a0f0b53f5 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -33,7 +33,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.memory.MemoryIndex; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanNotQuery; import org.apache.lucene.queries.spans.SpanOrQuery; @@ -83,6 +82,7 @@ import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.mapper.TestDocumentParserContext; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.VersionUtils; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index d30095466ad1e..b3b47d909b046 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -15,7 +15,6 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.intervals.IntervalQuery; import org.apache.lucene.queries.intervals.Intervals; import org.apache.lucene.queries.intervals.IntervalsSource; @@ -49,6 +48,7 @@ import org.elasticsearch.common.lucene.search.function.RandomScoreFunction; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import org.elasticsearch.percolator.QueryAnalyzer.QueryExtraction; import org.elasticsearch.percolator.QueryAnalyzer.Result; import org.elasticsearch.test.ESTestCase; diff --git a/server/build.gradle b/server/build.gradle index 781ab3c31958d..1d6a1f4f7689a 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -256,13 +256,9 @@ tasks.named("licenseHeaders").configure { tasks.named('splitPackagesAudit').configure { // Lucene packages should be owned by Lucene! - ignoreClasses 'org.apache.lucene.queries.BinaryDocValuesRangeQuery', - 'org.apache.lucene.queries.BlendedTermQuery', - 'org.apache.lucene.queries.SpanMatchNoDocsQuery', - // These are tricky because Lucene itself splits the index package, // but this should be fixed in Lucene 9 - 'org.apache.lucene.index.LazySoftDeletesDirectoryReaderWrapper', + ignoreClasses 'org.apache.lucene.index.LazySoftDeletesDirectoryReaderWrapper', // cli is owned by the libs/cli, so these should be moved to o.e.server.cli 'org.elasticsearch.cli.CommandLoggingConfigurator', diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/SpanBooleanQueryRewriteWithMaxClause.java b/server/src/main/java/org/elasticsearch/common/lucene/search/SpanBooleanQueryRewriteWithMaxClause.java index 18be37bff52e0..22b9d25044ab5 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/SpanBooleanQueryRewriteWithMaxClause.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/SpanBooleanQueryRewriteWithMaxClause.java @@ -14,7 +14,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.queries.spans.SpanOrQuery; import org.apache.lucene.queries.spans.SpanQuery; @@ -24,6 +23,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.AttributeSource; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.lucene.queries.SpanMatchNoDocsQuery; import java.io.IOException; import java.util.Collection; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java index 092f555b50965..f8100e794dbd9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeType.java @@ -17,7 +17,6 @@ import org.apache.lucene.document.LongRange; import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.queries.BinaryDocValuesRangeQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -28,6 +27,7 @@ import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java index b2006dddfabdb..9ad3e462796dd 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.index.query; -import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; @@ -22,6 +21,7 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.support.QueryParsers; +import org.elasticsearch.lucene.queries.SpanMatchNoDocsQuery; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQueryParser.java index e2d4fe399fe6a..ee17329f7767e 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQueryParser.java @@ -11,7 +11,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.DisjunctionMaxQuery; @@ -24,6 +23,7 @@ import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import java.io.IOException; import java.util.ArrayList; diff --git a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java b/server/src/main/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQuery.java similarity index 99% rename from server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java rename to server/src/main/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQuery.java index 2ad15176c5592..f474b6f7c883b 100644 --- a/server/src/main/java/org/apache/lucene/queries/BinaryDocValuesRangeQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQuery.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.apache.lucene.queries; +package org.elasticsearch.lucene.queries; import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.DocValues; diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java similarity index 99% rename from server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java rename to server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java index 6772838c95f49..d06d475503fad 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/queries/BlendedTermQuery.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.apache.lucene.queries; +package org.elasticsearch.lucene.queries; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; diff --git a/server/src/main/java/org/apache/lucene/queries/SpanMatchNoDocsQuery.java b/server/src/main/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQuery.java similarity index 98% rename from server/src/main/java/org/apache/lucene/queries/SpanMatchNoDocsQuery.java rename to server/src/main/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQuery.java index a609ec3d96479..b70e573c3b032 100644 --- a/server/src/main/java/org/apache/lucene/queries/SpanMatchNoDocsQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQuery.java @@ -5,7 +5,7 @@ * in compliance with, at your election, the Elastic License 2.0 or the Server * Side Public License, v 1. */ -package org.apache.lucene.queries; +package org.elasticsearch.lucene.queries; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; diff --git a/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java b/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java index 5e90871e4689a..c0c0cbeef7199 100644 --- a/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java +++ b/server/src/main/java/org/elasticsearch/lucene/search/vectorhighlight/CustomFieldQuery.java @@ -10,7 +10,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.sandbox.search.CombinedFieldQuery; import org.apache.lucene.search.BoostQuery; @@ -25,6 +24,7 @@ import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.index.search.ESToParentBlockJoinQuery; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import java.io.IOException; import java.util.Collection; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java index 871bc1e829992..c63727b0dfa9b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java @@ -15,7 +15,6 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.LongRange; import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.queries.BinaryDocValuesRangeQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; @@ -26,6 +25,7 @@ import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java index 053897a9cf597..2428a9f21d65c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java @@ -14,7 +14,6 @@ import org.apache.lucene.document.InetAddressRange; import org.apache.lucene.document.IntRange; import org.apache.lucene.document.LongRange; -import org.apache.lucene.queries.BinaryDocValuesRangeQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -31,6 +30,7 @@ import org.elasticsearch.index.mapper.RangeFieldMapper.RangeFieldType; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery; import org.elasticsearch.test.IndexSettingsModule; import org.junit.Before; diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index fd34193126a01..288bd6b2339d4 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -12,7 +12,6 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanOrQuery; import org.apache.lucene.queries.spans.SpanTermQuery; @@ -49,6 +48,7 @@ import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.search.QueryStringQueryParser; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java index ade08d877dde9..181b9c7dd3dc8 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -14,7 +14,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.FieldMaskingSpanQuery; import org.apache.lucene.queries.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.queries.spans.SpanQuery; @@ -33,6 +32,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.SpanBooleanQueryRewriteWithMaxClause; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.lucene.queries.SpanMatchNoDocsQuery; import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.xcontent.XContentBuilder; diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java index 2379c73c3b51a..93cfb4d3f17aa 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java @@ -8,12 +8,12 @@ package org.elasticsearch.index.query; -import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanQuery; import org.apache.lucene.queries.spans.SpanTermQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.lucene.queries.SpanMatchNoDocsQuery; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; diff --git a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryParserTests.java b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryParserTests.java index 7e3d6351eb676..e72e7e1d7d723 100644 --- a/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/MultiMatchQueryParserTests.java @@ -10,7 +10,6 @@ import org.apache.lucene.analysis.MockSynonymAnalyzer; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; @@ -32,6 +31,7 @@ import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.search.MultiMatchQueryParser.FieldAndBoost; +import org.elasticsearch.lucene.queries.BlendedTermQuery; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.MockKeywordPlugin; diff --git a/server/src/test/java/org/elasticsearch/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java b/server/src/test/java/org/elasticsearch/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java index ca85ebb717bb4..c036c6bb5799f 100644 --- a/server/src/test/java/org/elasticsearch/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java +++ b/server/src/test/java/org/elasticsearch/lucene/queries/BaseRandomBinaryDocValuesRangeQueryTestCase.java @@ -19,10 +19,10 @@ import java.util.Collections; import java.util.Objects; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CONTAINS; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CROSSES; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.INTERSECTS; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.WITHIN; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CONTAINS; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CROSSES; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.INTERSECTS; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.WITHIN; public abstract class BaseRandomBinaryDocValuesRangeQueryTestCase extends BaseRangeFieldQueryTestCase { diff --git a/server/src/test/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQueryTests.java index 2066138bba5d4..bdd5e2c8becfb 100644 --- a/server/src/test/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/queries/BinaryDocValuesRangeQueryTests.java @@ -22,10 +22,10 @@ import java.io.IOException; import static java.util.Collections.singleton; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CONTAINS; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CROSSES; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.INTERSECTS; -import static org.apache.lucene.queries.BinaryDocValuesRangeQuery.QueryType.WITHIN; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CONTAINS; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.CROSSES; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.INTERSECTS; +import static org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery.QueryType.WITHIN; public class BinaryDocValuesRangeQueryTests extends ESTestCase { diff --git a/server/src/test/java/org/elasticsearch/lucene/queries/BlendedTermQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/queries/BlendedTermQueryTests.java index 41703db570644..1ab5c4e5d3fb2 100644 --- a/server/src/test/java/org/elasticsearch/lucene/queries/BlendedTermQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/queries/BlendedTermQueryTests.java @@ -17,7 +17,6 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermStates; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DisjunctionMaxQuery; diff --git a/server/src/test/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQueryTests.java b/server/src/test/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQueryTests.java index dff9f8e5a093a..dc818e043fab5 100644 --- a/server/src/test/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQueryTests.java +++ b/server/src/test/java/org/elasticsearch/lucene/queries/SpanMatchNoDocsQueryTests.java @@ -16,7 +16,6 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; -import org.apache.lucene.queries.SpanMatchNoDocsQuery; import org.apache.lucene.queries.spans.SpanNearQuery; import org.apache.lucene.queries.spans.SpanOrQuery; import org.apache.lucene.queries.spans.SpanQuery; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java index 0b0e68e9f5fb2..24701b59956e8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateRangeHistogramAggregatorTests.java @@ -13,7 +13,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.queries.BinaryDocValuesRangeQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; @@ -26,6 +25,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.RangeFieldMapper; import org.elasticsearch.index.mapper.RangeType; +import org.elasticsearch.lucene.queries.BinaryDocValuesRangeQuery; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; From 64338bff3ef2fbea8b3bd153d92f79168c8cdd7a Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 25 Nov 2021 13:36:58 +0000 Subject: [PATCH 50/88] [ML] In 8.x ML will have to tolerate model snapshots for 6.4.0+ (#81039) Previously we intended that 8.x ML would only accept model snapshots created in version 7.0.0 or above. However, due to a bug (elastic/ml-cpp#1545) it's not possible to distinguish model snapshots created in versions 6.4.0-7.9.3 inclusive. Therefore, to be sure of meeting the stated policy of accepting model snapshots created in 7.0.0 or above ML will have to really accept those labelled as 6.4.0 or above. Fixes #81011 --- .../xpack/ml/integration/AnomalyJobCRUDIT.java | 2 +- .../xpack/ml/action/TransportOpenJobAction.java | 9 +++++---- .../job/task/OpenJobPersistentTasksExecutor.java | 14 ++++++++++---- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java index a7e3238e77f8f..f9ac43ab57d2e 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/AnomalyJobCRUDIT.java @@ -197,7 +197,7 @@ public void testOpenJobWithOldSnapshot() { assertThat( ex.getMessage(), containsString( - "[open-job-with-old-model-snapshot] job snapshot [snap_1] has min version before [7.0.0], " + "[open-job-with-old-model-snapshot] job model snapshot [snap_1] has min version before [7.0.0], " + "please revert to a newer model snapshot or reset the job" ) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index f167d6534ea15..3cc30e813ccdd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -54,7 +54,8 @@ import java.util.function.Predicate; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.MIN_SUPPORTED_SNAPSHOT_VERSION; +import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION; +import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION; import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.checkAssignmentState; /* @@ -191,17 +192,17 @@ public void onFailure(Exception e) { return; } assert modelSnapshot.getPage().results().size() == 1; - if (modelSnapshot.getPage().results().get(0).getMinVersion().onOrAfter(MIN_SUPPORTED_SNAPSHOT_VERSION)) { + if (modelSnapshot.getPage().results().get(0).getMinVersion().onOrAfter(MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION)) { modelSnapshotValidationListener.onResponse(true); return; } listener.onFailure( ExceptionsHelper.badRequestException( - "[{}] job snapshot [{}] has min version before [{}], " + "[{}] job model snapshot [{}] has min version before [{}], " + "please revert to a newer model snapshot or reset the job", jobParams.getJobId(), jobParams.getJob().getModelSnapshotId(), - MIN_SUPPORTED_SNAPSHOT_VERSION.toString() + MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION.toString() ) ); }, failure -> { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java index 222dd37917492..6c8ea314dd153 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java @@ -74,7 +74,12 @@ public class OpenJobPersistentTasksExecutor extends AbstractJobPersistentTasksExecutor { private static final Logger logger = LogManager.getLogger(OpenJobPersistentTasksExecutor.class); - public static final Version MIN_SUPPORTED_SNAPSHOT_VERSION = Version.V_7_0_0; + // Ideally this would be 7.0.0, but it has to be 6.4.0 because due to an oversight it's impossible + // for the Java code to distinguish the model states for versions 6.4.0 to 7.9.3 inclusive. + public static final Version MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION = Version.fromString("6.4.0"); + // We tell the user we support model snapshots newer than 7.0.0 as that's the major version + // boundary, even though behind the scenes we have to support back to 6.4.0. + public static final Version MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION = Version.V_7_0_0; // Resuming a job with a running datafeed from its current snapshot was added in 7.11 and // can only be done if the master node is on or after that version. @@ -425,16 +430,17 @@ private void verifyCurrentSnapshotVersion(String jobId, ActionListener } assert snapshot.getPage().results().size() == 1; ModelSnapshot snapshotObj = snapshot.getPage().results().get(0); - if (snapshotObj.getMinVersion().onOrAfter(MIN_SUPPORTED_SNAPSHOT_VERSION)) { + if (snapshotObj.getMinVersion().onOrAfter(MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION)) { listener.onResponse(true); return; } listener.onFailure( ExceptionsHelper.badRequestException( - "[{}] job snapshot [{}] has min version before [{}], please revert to a newer model snapshot or reset the job", + "[{}] job model snapshot [{}] has min version before [{}], " + + "please revert to a newer model snapshot or reset the job", jobId, jobSnapshotId, - MIN_SUPPORTED_SNAPSHOT_VERSION.toString() + MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION.toString() ) ); }, snapshotFailure -> { From 8ee05f9d9d57894d60802f9dcffd1a488962fa96 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 25 Nov 2021 15:26:30 +0100 Subject: [PATCH 51/88] Fix after restore Lucene.pruneUnreferencedFiles() conditional (#81047) In #68821 we introduced a condition to skip the pruning of unreferenced files after the restore of a snapshot for searchable snapshot shards. Sadly I managed to mess this up in a refactoring (#75308) few months after. This commit reintroduces the right conditional which is to NOT prune Lucene files for searchable snapshot shards. --- .../repositories/blobstore/FileRestoreContext.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java index dad6f296512f1..468f5c1e72374 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java @@ -175,7 +175,7 @@ public void restore(SnapshotFiles snapshotFiles, Store store, ActionListener Date: Thu, 25 Nov 2021 14:42:22 +0000 Subject: [PATCH 52/88] Fix split package between libs/cli and server, move org.elasticsearch.cli to org.elasticsearch.common.cli (#81038) Fix the split package org.elasticsearch.cli, between server and the cli library. Move the server org.elasticsearch.cli package to org.elasticsearch.common.cli. Removing split packages is a prerequisite to modularization. --- .../cli/keystore/BaseKeyStoreCommand.java | 2 +- .../cli/keystore/CreateKeyStoreCommand.java | 2 +- .../cli/keystore/HasPasswordKeyStoreCommand.java | 2 +- .../org/elasticsearch/cli/keystore/KeyStoreCli.java | 2 +- .../plugins/cli/InstallPluginCommand.java | 2 +- .../elasticsearch/plugins/cli/ListPluginsCommand.java | 2 +- .../java/org/elasticsearch/plugins/cli/PluginCli.java | 2 +- .../elasticsearch/plugins/cli/RemovePluginCommand.java | 2 +- .../src/main/java/org/elasticsearch/cli/Command.java | 2 +- .../cli/EvilEnvironmentAwareCommandTests.java | 1 + server/build.gradle | 10 +--------- .../org/elasticsearch/bootstrap/Elasticsearch.java | 2 +- .../cluster/coordination/ElasticsearchNodeCommand.java | 2 +- .../cluster/coordination/NodeToolCli.java | 2 +- .../{ => common}/cli/CommandLoggingConfigurator.java | 2 +- .../{ => common}/cli/EnvironmentAwareCommand.java | 6 +++++- .../{ => common}/cli/KeyStoreAwareCommand.java | 7 +++++-- .../{ => common}/cli/LoggingAwareCommand.java | 4 +++- .../{ => common}/cli/LoggingAwareMultiCommand.java | 4 +++- .../org/elasticsearch/index/shard/ShardToolCli.java | 2 +- .../license/licensor/tools/KeyPairGeneratorTool.java | 2 +- .../license/licensor/tools/LicenseGeneratorTool.java | 2 +- .../licensor/tools/LicenseVerificationTool.java | 2 +- .../xpack/security/cli/AutoConfigureNode.java | 2 +- .../xpack/security/cli/CertificateGenerateTool.java | 2 +- .../xpack/security/cli/CertificateTool.java | 4 ++-- .../xpack/security/cli/HttpCertificateCommand.java | 2 +- .../authc/esnative/tool/SetupPasswordTool.java | 4 ++-- .../xpack/security/authc/file/tool/UsersTool.java | 4 ++-- .../xpack/security/authc/saml/SamlMetadataCommand.java | 2 +- .../xpack/security/authc/service/FileTokensTool.java | 4 ++-- .../xpack/security/crypto/tool/SystemKeyTool.java | 2 +- .../tool/AutoConfigGenerateElasticPasswordHash.java | 2 +- .../xpack/security/tool/BaseRunAsSuperuserCommand.java | 2 +- .../main/java/org/elasticsearch/xpack/sql/cli/Cli.java | 2 +- .../watcher/trigger/schedule/tool/CronEvalTool.java | 2 +- 36 files changed, 52 insertions(+), 48 deletions(-) rename server/src/main/java/org/elasticsearch/{ => common}/cli/CommandLoggingConfigurator.java (97%) rename server/src/main/java/org/elasticsearch/{ => common}/cli/EnvironmentAwareCommand.java (96%) rename server/src/main/java/org/elasticsearch/{ => common}/cli/KeyStoreAwareCommand.java (92%) rename server/src/main/java/org/elasticsearch/{ => common}/cli/LoggingAwareCommand.java (93%) rename server/src/main/java/org/elasticsearch/{ => common}/cli/LoggingAwareMultiCommand.java (93%) diff --git a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/BaseKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/BaseKeyStoreCommand.java index 268cafe16bf1b..f694e8586e6b6 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/BaseKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/BaseKeyStoreCommand.java @@ -12,9 +12,9 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.KeyStoreAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.env.Environment; diff --git a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/CreateKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/CreateKeyStoreCommand.java index 4ad64d8595df1..b78971932b234 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/CreateKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/CreateKeyStoreCommand.java @@ -12,9 +12,9 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.KeyStoreAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.env.Environment; diff --git a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/HasPasswordKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/HasPasswordKeyStoreCommand.java index 6a25a84637888..9e8667cc77dae 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/HasPasswordKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/HasPasswordKeyStoreCommand.java @@ -10,9 +10,9 @@ import joptsimple.OptionSet; -import org.elasticsearch.cli.KeyStoreAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.env.Environment; diff --git a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/KeyStoreCli.java b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/KeyStoreCli.java index d751485c0922d..710531a1999ab 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/KeyStoreCli.java +++ b/distribution/tools/keystore-cli/src/main/java/org/elasticsearch/cli/keystore/KeyStoreCli.java @@ -8,8 +8,8 @@ package org.elasticsearch.cli.keystore; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; /** * A cli tool for managing secrets in the elasticsearch keystore. diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginCommand.java index a671be0fe45f9..2f72833c65703 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/InstallPluginCommand.java @@ -11,8 +11,8 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.PluginInfo; diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/ListPluginsCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/ListPluginsCommand.java index aebb33447c0f4..0aed104c7117c 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/ListPluginsCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/ListPluginsCommand.java @@ -11,8 +11,8 @@ import joptsimple.OptionSet; import org.elasticsearch.Version; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.PluginInfo; diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginCli.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginCli.java index bdf3f8395e0e8..f5e5b6136a5b4 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginCli.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginCli.java @@ -9,8 +9,8 @@ package org.elasticsearch.plugins.cli; import org.elasticsearch.cli.Command; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; import org.elasticsearch.core.internal.io.IOUtils; import java.io.IOException; diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginCommand.java index 0cb0c927f18d4..5654984303116 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/RemovePluginCommand.java @@ -11,8 +11,8 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.env.Environment; import java.util.Arrays; diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/Command.java b/libs/cli/src/main/java/org/elasticsearch/cli/Command.java index 07b5c17c04cf4..1df7e9432f3a1 100644 --- a/libs/cli/src/main/java/org/elasticsearch/cli/Command.java +++ b/libs/cli/src/main/java/org/elasticsearch/cli/Command.java @@ -93,7 +93,7 @@ public final int main(String[] args, Terminal terminal) throws Exception { /** * Executes the command, but all errors are thrown. */ - void mainWithoutErrorHandling(String[] args, Terminal terminal) throws Exception { + protected void mainWithoutErrorHandling(String[] args, Terminal terminal) throws Exception { final OptionSet options = parser.parse(args); if (options.has(helpOption)) { diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/cli/EvilEnvironmentAwareCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/cli/EvilEnvironmentAwareCommandTests.java index c917e681dd963..df43f0ce2da0e 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/cli/EvilEnvironmentAwareCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/cli/EvilEnvironmentAwareCommandTests.java @@ -11,6 +11,7 @@ import joptsimple.OptionSet; import org.apache.lucene.util.TestRuleRestoreSystemProperties; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; diff --git a/server/build.gradle b/server/build.gradle index 1d6a1f4f7689a..c9c7cde70a1c9 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -258,13 +258,5 @@ tasks.named('splitPackagesAudit').configure { // Lucene packages should be owned by Lucene! // These are tricky because Lucene itself splits the index package, // but this should be fixed in Lucene 9 - ignoreClasses 'org.apache.lucene.index.LazySoftDeletesDirectoryReaderWrapper', - - // cli is owned by the libs/cli, so these should be moved to o.e.server.cli - 'org.elasticsearch.cli.CommandLoggingConfigurator', - 'org.elasticsearch.cli.EnvironmentAwareCommand', - 'org.elasticsearch.cli.KeyStoreAwareCommand', - 'org.elasticsearch.cli.LoggingAwareCommand', - 'org.elasticsearch.cli.LoggingAwareMultiCommand' - + ignoreClasses 'org.apache.lucene.index.LazySoftDeletesDirectoryReaderWrapper' } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index ae40ea349edb8..c3c864a7d1a44 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -14,10 +14,10 @@ import joptsimple.util.PathConverter; import org.elasticsearch.Build; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.env.Environment; import org.elasticsearch.monitor.jvm.JvmInfo; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java index 1e37f1b654f2d..c7a93007e979b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -16,7 +16,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.rollover.Condition; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.cluster.ClusterModule; @@ -27,6 +26,7 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplateMetadata; import org.elasticsearch.cluster.metadata.DataStreamMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java index 485da303cce6d..89b2fde4ab38a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java @@ -7,9 +7,9 @@ */ package org.elasticsearch.cluster.coordination; -import org.elasticsearch.cli.CommandLoggingConfigurator; import org.elasticsearch.cli.MultiCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.CommandLoggingConfigurator; import org.elasticsearch.env.NodeRepurposeCommand; import org.elasticsearch.env.OverrideNodeVersionCommand; diff --git a/server/src/main/java/org/elasticsearch/cli/CommandLoggingConfigurator.java b/server/src/main/java/org/elasticsearch/common/cli/CommandLoggingConfigurator.java similarity index 97% rename from server/src/main/java/org/elasticsearch/cli/CommandLoggingConfigurator.java rename to server/src/main/java/org/elasticsearch/common/cli/CommandLoggingConfigurator.java index 3053d1cb92201..41a077cd769f5 100644 --- a/server/src/main/java/org/elasticsearch/cli/CommandLoggingConfigurator.java +++ b/server/src/main/java/org/elasticsearch/common/cli/CommandLoggingConfigurator.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.cli; +package org.elasticsearch.common.cli; import org.apache.logging.log4j.Level; import org.elasticsearch.common.logging.LogConfigurator; diff --git a/server/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java b/server/src/main/java/org/elasticsearch/common/cli/EnvironmentAwareCommand.java similarity index 96% rename from server/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java rename to server/src/main/java/org/elasticsearch/common/cli/EnvironmentAwareCommand.java index b35899f098f7f..ed2429bf72fdc 100644 --- a/server/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java +++ b/server/src/main/java/org/elasticsearch/common/cli/EnvironmentAwareCommand.java @@ -6,12 +6,16 @@ * Side Public License, v 1. */ -package org.elasticsearch.cli; +package org.elasticsearch.common.cli; import joptsimple.OptionSet; import joptsimple.OptionSpec; import joptsimple.util.KeyValuePair; +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.env.Environment; diff --git a/server/src/main/java/org/elasticsearch/cli/KeyStoreAwareCommand.java b/server/src/main/java/org/elasticsearch/common/cli/KeyStoreAwareCommand.java similarity index 92% rename from server/src/main/java/org/elasticsearch/cli/KeyStoreAwareCommand.java rename to server/src/main/java/org/elasticsearch/common/cli/KeyStoreAwareCommand.java index a103b379ae7e7..3067d477d9cb0 100644 --- a/server/src/main/java/org/elasticsearch/cli/KeyStoreAwareCommand.java +++ b/server/src/main/java/org/elasticsearch/common/cli/KeyStoreAwareCommand.java @@ -6,10 +6,13 @@ * Side Public License, v 1. */ -package org.elasticsearch.cli; +package org.elasticsearch.common.cli; import joptsimple.OptionSet; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.env.Environment; @@ -19,7 +22,7 @@ import java.util.Arrays; /** - * An {@link org.elasticsearch.cli.EnvironmentAwareCommand} that needs to access the elasticsearch keystore, possibly + * An {@link EnvironmentAwareCommand} that needs to access the elasticsearch keystore, possibly * decrypting it if it is password protected. */ public abstract class KeyStoreAwareCommand extends EnvironmentAwareCommand { diff --git a/server/src/main/java/org/elasticsearch/cli/LoggingAwareCommand.java b/server/src/main/java/org/elasticsearch/common/cli/LoggingAwareCommand.java similarity index 93% rename from server/src/main/java/org/elasticsearch/cli/LoggingAwareCommand.java rename to server/src/main/java/org/elasticsearch/common/cli/LoggingAwareCommand.java index fa9c20d61f607..9682a5680eb05 100644 --- a/server/src/main/java/org/elasticsearch/cli/LoggingAwareCommand.java +++ b/server/src/main/java/org/elasticsearch/common/cli/LoggingAwareCommand.java @@ -6,7 +6,9 @@ * Side Public License, v 1. */ -package org.elasticsearch.cli; +package org.elasticsearch.common.cli; + +import org.elasticsearch.cli.Command; /** * A command that is aware of logging. This class should be preferred over the base {@link Command} class for any CLI tools that depend on diff --git a/server/src/main/java/org/elasticsearch/cli/LoggingAwareMultiCommand.java b/server/src/main/java/org/elasticsearch/common/cli/LoggingAwareMultiCommand.java similarity index 93% rename from server/src/main/java/org/elasticsearch/cli/LoggingAwareMultiCommand.java rename to server/src/main/java/org/elasticsearch/common/cli/LoggingAwareMultiCommand.java index fe005522735f4..d3996d815d1da 100644 --- a/server/src/main/java/org/elasticsearch/cli/LoggingAwareMultiCommand.java +++ b/server/src/main/java/org/elasticsearch/common/cli/LoggingAwareMultiCommand.java @@ -6,7 +6,9 @@ * Side Public License, v 1. */ -package org.elasticsearch.cli; +package org.elasticsearch.common.cli; + +import org.elasticsearch.cli.MultiCommand; /** * A multi-command that is aware of logging. This class should be preferred over the base {@link MultiCommand} class for any CLI tools that diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardToolCli.java b/server/src/main/java/org/elasticsearch/index/shard/ShardToolCli.java index c45cac1c081c4..306db0025c6e0 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardToolCli.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardToolCli.java @@ -7,8 +7,8 @@ */ package org.elasticsearch.index.shard; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; /** * Class encapsulating and dispatching commands from the {@code elasticsearch-shard} command line tool diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java index a4e69f0c1ab87..6cfb3184e73be 100644 --- a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/KeyPairGeneratorTool.java @@ -10,9 +10,9 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.LoggingAwareCommand; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseGeneratorTool.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseGeneratorTool.java index aa1f9bb58471c..4dc1b17b2f9f6 100644 --- a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseGeneratorTool.java +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseGeneratorTool.java @@ -10,11 +10,11 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.cli.LoggingAwareCommand; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.license.License; diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java index 616ff9dff9ee9..87aefeefd6aae 100644 --- a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java @@ -10,11 +10,11 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.cli.LoggingAwareCommand; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.license.License; diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java index e3053eaa00378..bea70937d1876 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/AutoConfigureNode.java @@ -16,7 +16,6 @@ import org.bouncycastle.asn1.x509.GeneralNames; import org.bouncycastle.openssl.jcajce.JcaPEMWriter; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; @@ -25,6 +24,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkUtils; diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java index 8a84cbdcb7025..d51d2032617f6 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java @@ -19,11 +19,11 @@ import org.bouncycastle.openssl.jcajce.JcePEMEncryptorBuilder; import org.bouncycastle.pkcs.PKCS10CertificationRequest; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.ssl.PemUtils; import org.elasticsearch.common.util.set.Sets; diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java index c4a9b1119d55e..fea09bf252063 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java @@ -21,13 +21,13 @@ import org.bouncycastle.openssl.jcajce.JcePEMEncryptorBuilder; import org.bouncycastle.pkcs.PKCS10CertificationRequest; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.Terminal.Verbosity; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.ssl.PemUtils; import org.elasticsearch.common.util.set.Sets; diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java index 5cbdb67e53658..5a1d7c270c1d7 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/HttpCertificateCommand.java @@ -21,12 +21,12 @@ import org.bouncycastle.util.io.pem.PemObjectGenerator; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.SuppressForbidden; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.ssl.PemUtils; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java index 5346e2b436902..a1b14bf8db899 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java @@ -12,13 +12,13 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.KeyStoreAwareCommand; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.Terminal.Verbosity; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java index b77e3b334332d..416f71911f0dc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java @@ -10,12 +10,12 @@ import joptsimple.OptionSpec; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java index cd74a39e7bc42..15ed84168ae96 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java @@ -14,11 +14,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.KeyStoreAwareCommand; import org.elasticsearch.cli.SuppressForbidden; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/FileTokensTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/FileTokensTool.java index b8b4a0f634e90..79e5f74c39b78 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/FileTokensTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/FileTokensTool.java @@ -10,12 +10,12 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareMultiCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; +import org.elasticsearch.common.cli.LoggingAwareMultiCommand; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.XPackSettings; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyTool.java index 098157b2d26c1..0c0b151f8cbb4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/crypto/tool/SystemKeyTool.java @@ -10,10 +10,10 @@ import joptsimple.OptionSpec; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/AutoConfigGenerateElasticPasswordHash.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/AutoConfigGenerateElasticPasswordHash.java index b762f7648775b..cfe1262057883 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/AutoConfigGenerateElasticPasswordHash.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/AutoConfigGenerateElasticPasswordHash.java @@ -10,9 +10,9 @@ import joptsimple.OptionSet; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.KeyStoreAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.env.Environment; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java index 22f01d57ee91d..6909da4df03bb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java @@ -12,9 +12,9 @@ import org.elasticsearch.Version; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.KeyStoreAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.KeyStoreAwareCommand; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java index d60e83bc4b536..c87ce9ae2ddcf 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/Cli.java @@ -10,9 +10,9 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.LoggingAwareCommand; import org.elasticsearch.xpack.sql.cli.command.ClearScreenCliCommand; import org.elasticsearch.xpack.sql.cli.command.CliCommand; import org.elasticsearch.xpack.sql.cli.command.CliCommands; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java index de1596412daff..45832d418ec94 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/tool/CronEvalTool.java @@ -10,9 +10,9 @@ import joptsimple.OptionSpec; import org.elasticsearch.cli.ExitCodes; -import org.elasticsearch.cli.LoggingAwareCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.cli.LoggingAwareCommand; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.xpack.core.scheduler.Cron; From accff0607af5421e10447dbc73a1036b4dd89dc3 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Thu, 25 Nov 2021 16:26:51 +0000 Subject: [PATCH 53/88] Remove superfluous lucene PostingFormat service configuration (#81049) This PR simply removes the org.apache.lucene.codecs.PostingsFormat service configuration file from elasticsearch server. The service implementation is part of lucene, and already configured by Lucene itself. --- .../META-INF/services/org.apache.lucene.codecs.PostingsFormat | 1 - 1 file changed, 1 deletion(-) delete mode 100644 server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat deleted file mode 100644 index 2c92f0ecd3f51..0000000000000 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat +++ /dev/null @@ -1 +0,0 @@ -org.apache.lucene.search.suggest.document.Completion50PostingsFormat From 8ab03d021c4bc2d6c0c2e182bf8e424e04850794 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 25 Nov 2021 08:40:50 -0800 Subject: [PATCH 54/88] [DOCS] Edits reset transforms API (#81027) --- docs/reference/rest-api/common-parms.asciidoc | 4 ++-- .../transform/apis/reset-transform.asciidoc | 18 +++++++++++++----- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index c0fe9c9d244f8..8122ad6bd7032 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -1108,8 +1108,8 @@ end::timeoutparms[] tag::transform-id[] Identifier for the {transform}. This identifier can contain lowercase -alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start -and end with alphanumeric characters. +alphanumeric characters (a-z and 0-9), hyphens, and underscores. It has a 64 +character limit and must start and end with alphanumeric characters. end::transform-id[] tag::transform-id-wildcard[] diff --git a/docs/reference/transform/apis/reset-transform.asciidoc b/docs/reference/transform/apis/reset-transform.asciidoc index 26be42bd0b371..e4d142970e828 100644 --- a/docs/reference/transform/apis/reset-transform.asciidoc +++ b/docs/reference/transform/apis/reset-transform.asciidoc @@ -8,7 +8,7 @@ Reset {transform} ++++ -Resets an existing {transform}. +Resets a {transform}. [[reset-transform-request]] == {api-request-title} @@ -20,7 +20,14 @@ Resets an existing {transform}. * Requires the `manage_transform` cluster privilege. This privilege is included in the `transform_admin` built-in role. -* Before you can reset the {transform}, you must stop it; alternatively, use the `force` query parameter. + +[reset-transform-desc]] +== {api-description-title} + +Before you can reset the {transform}, you must stop it; alternatively, use the +`force` query parameter. + +If the destination index was created by the transform, it is deleted. [[reset-transform-path-parms]] == {api-path-parms-title} @@ -33,9 +40,10 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=transform-id] == {api-query-parms-title} `force`:: -(Optional, Boolean) When `true`, the {transform} is reset regardless of its -current state. The default value is `false`, meaning that the {transform} must be -`stopped` before it can be reset. +(Optional, Boolean) +If this value is `true`, the {transform} is reset regardless of its current +state. If it's false, the {transform} must be `stopped` before it can be reset. +The default value is `false` [[reset-transform-examples]] == {api-examples-title} From 8da1236bca03f38b694e4e83a97fb91aca68b1dd Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 25 Nov 2021 09:08:46 -0800 Subject: [PATCH 55/88] [DOCS] Clarify impact of force stop trained model deployment (#81026) --- .../df-analytics/apis/stop-trained-model-deployment.asciidoc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc b/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc index a486ee37bb239..c3a17da0c5322 100644 --- a/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc +++ b/docs/reference/ml/df-analytics/apis/stop-trained-model-deployment.asciidoc @@ -43,7 +43,8 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-deployments] `force`:: (Optional, Boolean) If true, the deployment is stopped even if it is referenced -by ingest pipelines. +by ingest pipelines. You can't use these pipelines until you restart the model +deployment. //// [role="child_attributes"] From e5de9d8ad7693691739ef1b3fc43ff9943afb737 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Thu, 25 Nov 2021 10:06:52 -0800 Subject: [PATCH 56/88] [DOCS] Add actual and typical values in ML alerting docs (#80571) --- .../ml/anomaly-detection/ml-configuring-alerts.asciidoc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc index 6dd13006f4601..3844d5fcd7aed 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-alerts.asciidoc @@ -228,6 +228,9 @@ The list of top records. .Properties of `context.topRecords` [%collapsible%open] ==== +`actual`::: +The actual value for the bucket. + `by_field_value`::: The value of the by field. @@ -248,6 +251,9 @@ The field used to segment the analysis. `score`::: A normalized score between 0-100, which is based on the probability of the anomalousness of this record. + +`typical`::: +The typical value for the bucket, according to analytical modeling. ==== [[anomaly-jobs-health-action-variables]] From d51678562e0b79a87f2c18bfae207385997cf311 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 25 Nov 2021 19:48:12 +0000 Subject: [PATCH 57/88] [ML] Expected snapshot min version can now be 8 (#81054) Followup to elastic/ml-cpp#2139 Fixes #81055 Fixes #81070 --- build.gradle | 4 ++-- .../elasticsearch/common/compress/CompressedXContent.java | 4 ++-- .../elasticsearch/upgrades/MlJobSnapshotUpgradeIT.java | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/build.gradle b/build.gradle index 9d5ab644667cc..59fc13e9e1ac8 100644 --- a/build.gradle +++ b/build.gradle @@ -131,9 +131,9 @@ tasks.register("verifyVersions") { * after the backport of the backcompat code is complete. */ -boolean bwc_tests_enabled = false +boolean bwc_tests_enabled = true // place a PR link here when committing bwc changes: -String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/80348" +String bwc_tests_disabled_issue = "" /* * FIPS 140-2 behavior was fixed in 7.11.0. Before that there is no way to run elasticsearch in a * JVM that is properly configured to be in fips mode with BCFIPS. For now we need to disable diff --git a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java index d43c78792938a..fa1a4421651ca 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java +++ b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java @@ -169,7 +169,7 @@ public String getSha256() { public static CompressedXContent readCompressedString(StreamInput in) throws IOException { final String sha256; final byte[] compressedData; - if (in.getVersion().onOrAfter(Version.V_8_1_0)) { + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { sha256 = in.readString(); compressedData = in.readByteArray(); } else { @@ -181,7 +181,7 @@ public static CompressedXContent readCompressedString(StreamInput in) throws IOE } public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { out.writeString(sha256); } else { int crc32 = crc32FromCompressed(bytes); diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlJobSnapshotUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlJobSnapshotUpgradeIT.java index 70210a8ffae8f..b65411d82cb10 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlJobSnapshotUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlJobSnapshotUpgradeIT.java @@ -153,8 +153,8 @@ private void testSnapshotUpgrade() throws Exception { GetModelSnapshotsResponse modelSnapshots = getModelSnapshots(job.getId()); assertThat(modelSnapshots.snapshots(), hasSize(2)); - assertThat(modelSnapshots.snapshots().get(0).getMinVersion().major, equalTo((byte) 7)); - assertThat(modelSnapshots.snapshots().get(1).getMinVersion().major, equalTo((byte) 7)); + assertThat(modelSnapshots.snapshots().get(0).getMinVersion().major, equalTo(UPGRADE_FROM_VERSION.major)); + assertThat(modelSnapshots.snapshots().get(1).getMinVersion().major, equalTo(UPGRADE_FROM_VERSION.major)); ModelSnapshot snapshot = modelSnapshots.snapshots() .stream() @@ -237,8 +237,8 @@ private void createJobAndSnapshots() throws Exception { GetModelSnapshotsResponse modelSnapshots = getModelSnapshots(job.getId()); assertThat(modelSnapshots.snapshots(), hasSize(2)); - assertThat(modelSnapshots.snapshots().get(0).getMinVersion().major, equalTo((byte) 7)); - assertThat(modelSnapshots.snapshots().get(1).getMinVersion().major, equalTo((byte) 7)); + assertThat(modelSnapshots.snapshots().get(0).getMinVersion().major, equalTo(UPGRADE_FROM_VERSION.major)); + assertThat(modelSnapshots.snapshots().get(1).getMinVersion().major, equalTo(UPGRADE_FROM_VERSION.major)); } private PutJobResponse buildAndPutJob(String jobId, TimeValue bucketSpan) throws Exception { From d29da0270d080e39c8d02df7c831a5e0fe8b377c Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Thu, 25 Nov 2021 21:35:35 +0100 Subject: [PATCH 58/88] Make Circuit Breaker Lookup in BigArrays Faster (#81033) I noticed that when benchmarking translog writes (which currently involve copying a lot of bytes across 2 pooled buffers backed by `BigArrays`), a non-trivial amount of time is spent on looking up the circuit breaker. This PR makes that lookup faster in general by using a more efficient map, but also just caches the breaker instance on `BigArrays` itself to not have to do the lookup every 16k during a multi MB write in the first place. --- .../java/org/elasticsearch/common/util/BigArrays.java | 11 +++++++++-- .../breaker/HierarchyCircuitBreakerService.java | 3 +-- .../breaker/HierarchyCircuitBreakerServiceTests.java | 6 +++--- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java index e4739879073e1..9b8b20d4cae55 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/server/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -391,7 +391,10 @@ public T set(long index, T value) { } final PageCacheRecycler recycler; + @Nullable private final CircuitBreakerService breakerService; + @Nullable + private final CircuitBreaker breaker; private final boolean checkBreaker; private final BigArrays circuitBreakingInstance; private final String breakerName; @@ -410,6 +413,11 @@ protected BigArrays( this.checkBreaker = checkBreaker; this.recycler = recycler; this.breakerService = breakerService; + if (breakerService != null) { + breaker = breakerService.getBreaker(breakerName); + } else { + breaker = null; + } this.breakerName = breakerName; if (checkBreaker) { this.circuitBreakingInstance = this; @@ -427,8 +435,7 @@ protected BigArrays( * we do not add the delta to the breaker if it trips. */ void adjustBreaker(final long delta, final boolean isDataAlreadyCreated) { - if (this.breakerService != null) { - CircuitBreaker breaker = this.breakerService.getBreaker(breakerName); + if (this.breaker != null) { if (this.checkBreaker) { // checking breaker means potentially tripping, but it doesn't // have to if the delta is negative diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 28c5bf3a9a985..8df5e9e8834a1 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -31,7 +31,6 @@ import java.lang.management.ManagementFactory; import java.lang.management.MemoryMXBean; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -204,7 +203,7 @@ public HierarchyCircuitBreakerService(Settings settings, List c } childCircuitBreakers.put(breakerSettings.getName(), validateAndCreateBreaker(breakerSettings)); } - this.breakers = Collections.unmodifiableMap(childCircuitBreakers); + this.breakers = Map.copyOf(childCircuitBreakers); this.parentSettings = new BreakerSettings( CircuitBreaker.PARENT, TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index 66b7c47b8eedd..487a0e3a34720 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -243,7 +243,7 @@ public void testBorrowingSiblingBreakerMemory() { assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]")); assertThat( exception.getMessage(), - containsString("usages [request=157286400/150mb, fielddata=54001664/51.5mb, inflight_requests=0/0b]") + containsString("usages [fielddata=54001664/51.5mb, request=157286400/150mb, inflight_requests=0/0b]") ); assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); } @@ -305,11 +305,11 @@ long currentMemoryUsage() { assertThat( exception.getMessage(), containsString( - "usages [request=" + "usages [fielddata=0/0b, request=" + requestCircuitBreakerUsed + "/" + new ByteSizeValue(requestCircuitBreakerUsed) - + ", fielddata=0/0b, inflight_requests=0/0b]" + + ", inflight_requests=0/0b]" ) ); assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); From 7ce8054f1f0212acaceaa0b9712b5ed049c46fec Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Fri, 26 Nov 2021 06:41:24 +0100 Subject: [PATCH 59/88] Fix Failures in HierarchyCircuitBreakerServiceTests (#81073) In #81033 the type of map that is iterated over to compute the various usage values was changed. This changed the order of the various breakers in the exception string and made it less deterministic (although it was always just deterministic by accident in these tests). => we shouldn't assume any order in the assertions --- .../HierarchyCircuitBreakerServiceTests.java | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index 487a0e3a34720..819ded625d1b9 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -241,10 +241,10 @@ public void testBorrowingSiblingBreakerMemory() { ); assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [should break] would be")); assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]")); - assertThat( - exception.getMessage(), - containsString("usages [fielddata=54001664/51.5mb, request=157286400/150mb, inflight_requests=0/0b]") - ); + assertThat(exception.getMessage(), containsString("usages [")); + assertThat(exception.getMessage(), containsString("fielddata=54001664/51.5mb")); + assertThat(exception.getMessage(), containsString("inflight_requests=0/0b")); + assertThat(exception.getMessage(), containsString("request=157286400/150mb")); assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); } } @@ -302,16 +302,13 @@ long currentMemoryUsage() { ) ); final long requestCircuitBreakerUsed = (requestBreaker.getUsed() + reservationInBytes) * 2; + assertThat(exception.getMessage(), containsString("usages [")); + assertThat(exception.getMessage(), containsString("fielddata=0/0b")); assertThat( exception.getMessage(), - containsString( - "usages [fielddata=0/0b, request=" - + requestCircuitBreakerUsed - + "/" - + new ByteSizeValue(requestCircuitBreakerUsed) - + ", inflight_requests=0/0b]" - ) + containsString("request=" + requestCircuitBreakerUsed + "/" + new ByteSizeValue(requestCircuitBreakerUsed)) ); + assertThat(exception.getMessage(), containsString("inflight_requests=0/0b")); assertThat(exception.getDurability(), equalTo(CircuitBreaker.Durability.TRANSIENT)); assertEquals(0, requestBreaker.getTrippedCount()); assertEquals(1, service.stats().getStats(CircuitBreaker.PARENT).getTrippedCount()); From f3b5299a2077c24ac98fd478fd3bcdc561acd13b Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 29 Nov 2021 09:38:58 +0100 Subject: [PATCH 60/88] Move TransportGetLicenseAction to SAME Threadpool (#80993) This is motivated by a number of recent SDHs that had these transport actions queue up on the manangement pool. These were not the reason for the blockage on the managment queue, but they are often sent at a high rate by Beats in the same scenarios that see a high rate of stats requests from Beats. Moving them off of the management pool at least makes sure that we don't get Beats retrying them over and over on slowness and generally saves some resources by avoiding ctx switches and having these requests live for longer than necessary. There's no point in running this on the management pool. It should have already been fast enough for SAME with the exception of reading the public key from disk maybe. Made it so the public key is just a constant and doesn't have to be read+deserialized over and over and also cached the verified property for a `License` instance so it should never have to be computed in practice anyway. --- .../tools/LicenseVerificationTool.java | 3 ++- .../licensor/LicenseVerificationTests.java | 11 +++++++--- .../org/elasticsearch/license/License.java | 18 ++++++++++++++++ .../elasticsearch/license/LicenseService.java | 14 ++++++------- .../license/LicenseVerifier.java | 21 ++++++++++++------- .../license/LicensesMetadata.java | 2 +- .../license/TransportDeleteLicenseAction.java | 1 - .../license/TransportGetLicenseAction.java | 8 ++----- .../license/LicensesManagerServiceTests.java | 3 +-- 9 files changed, 51 insertions(+), 30 deletions(-) diff --git a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java index 87aefeefd6aae..1059b100fc396 100644 --- a/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java +++ b/x-pack/license-tools/src/main/java/org/elasticsearch/license/licensor/tools/LicenseVerificationTool.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.cli.LoggingAwareCommand; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.license.CryptUtils; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseVerifier; import org.elasticsearch.xcontent.ToXContent; @@ -70,7 +71,7 @@ protected void execute(Terminal terminal, OptionSet options) throws Exception { } // verify - if (LicenseVerifier.verifyLicense(licenseSpec, Files.readAllBytes(publicKeyPath)) == false) { + if (LicenseVerifier.verifyLicense(licenseSpec, CryptUtils.readPublicKey(Files.readAllBytes(publicKeyPath))) == false) { throw new UserException(ExitCodes.DATA_ERROR, "Invalid License!"); } XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); diff --git a/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/LicenseVerificationTests.java b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/LicenseVerificationTests.java index bed6e471c1c53..57d0a74ebc0b9 100644 --- a/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/LicenseVerificationTests.java +++ b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/LicenseVerificationTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.license.licensor; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.license.CryptUtils; import org.elasticsearch.license.DateUtils; import org.elasticsearch.license.License; import org.elasticsearch.license.LicenseVerifier; @@ -16,28 +17,32 @@ import java.nio.file.Files; import java.nio.file.Path; +import java.security.PublicKey; public class LicenseVerificationTests extends ESTestCase { protected Path pubKeyPath = null; + protected PublicKey publicKey; protected Path priKeyPath = null; @Before public void setup() throws Exception { pubKeyPath = getDataPath("/public.key"); + publicKey = CryptUtils.readPublicKey(Files.readAllBytes(pubKeyPath)); priKeyPath = getDataPath("/private.key"); } @After public void cleanUp() { pubKeyPath = null; + publicKey = null; priKeyPath = null; } public void testGeneratedLicenses() throws Exception { final TimeValue fortyEightHours = TimeValue.timeValueHours(2 * 24); final License license = TestUtils.generateSignedLicense(fortyEightHours, pubKeyPath, priKeyPath); - assertTrue(LicenseVerifier.verifyLicense(license, Files.readAllBytes(pubKeyPath))); + assertTrue(LicenseVerifier.verifyLicense(license, publicKey)); } public void testLicenseTampering() throws Exception { @@ -50,7 +55,7 @@ public void testLicenseTampering() throws Exception { .validate() .build(); - assertFalse(LicenseVerifier.verifyLicense(tamperedLicense, Files.readAllBytes(pubKeyPath))); + assertFalse(LicenseVerifier.verifyLicense(tamperedLicense, publicKey)); } public void testRandomLicenseVerification() throws Exception { @@ -58,7 +63,7 @@ public void testRandomLicenseVerification() throws Exception { randomIntBetween(License.VERSION_START, License.VERSION_CURRENT) ); License generatedLicense = generateSignedLicense(licenseSpec, pubKeyPath, priKeyPath); - assertTrue(LicenseVerifier.verifyLicense(generatedLicense, Files.readAllBytes(pubKeyPath))); + assertTrue(LicenseVerifier.verifyLicense(generatedLicense, publicKey)); } private static License generateSignedLicense(TestUtils.LicenseSpec spec, Path pubKeyPath, Path priKeyPath) throws Exception { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java index b62afdd39c818..6026512c712dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/License.java @@ -471,6 +471,24 @@ private static void validateLimits(String type, int maxNodes, int maxResourceUni } } + private Boolean isVerified; + + public boolean verified() { + final Boolean v = isVerified; + if (v != null) { + return v; + } + final boolean verified = doVerify(); + this.isVerified = verified; + return verified; + } + + private boolean doVerify() { + boolean autoGeneratedLicense = License.isAutoGeneratedLicense(signature()); + return (autoGeneratedLicense && SelfGeneratedLicense.verify(this)) + || (autoGeneratedLicense == false && LicenseVerifier.verifyLicense(this)); + } + public static License readLicense(StreamInput in) throws IOException { int version = in.readVInt(); // Version for future extensibility if (version > VERSION_CURRENT) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index 343520657ebab..d294370979f2d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -23,11 +23,11 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; import org.elasticsearch.protocol.xpack.license.LicensesStatus; import org.elasticsearch.protocol.xpack.license.PutLicenseResponse; import org.elasticsearch.threadpool.ThreadPool; @@ -110,7 +110,7 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste * Currently active license */ private final AtomicReference currentLicense = new AtomicReference<>(); - private SchedulerEngine scheduler; + private final SchedulerEngine scheduler; private final Clock clock; /** @@ -121,7 +121,7 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste /** * Callbacks to notify relative to license expiry */ - private List expirationCallbacks = new ArrayList<>(); + private final List expirationCallbacks = new ArrayList<>(); /** * Which license types are permitted to be uploaded to the cluster @@ -362,7 +362,7 @@ public void triggered(SchedulerEngine.Event event) { /** * Remove license from the cluster state metadata */ - public void removeLicense(final DeleteLicenseRequest request, final ActionListener listener) { + public void removeLicense(final ActionListener listener) { final PostStartBasicRequest startBasicRequest = new PostStartBasicRequest().acknowledge(true); clusterService.submitStateUpdateTask( "delete license", @@ -609,15 +609,13 @@ public static License getLicense(final Metadata metadata) { return getLicense(licensesMetadata); } - static License getLicense(final LicensesMetadata metadata) { + static License getLicense(@Nullable final LicensesMetadata metadata) { if (metadata != null) { License license = metadata.getLicense(); if (license == LicensesMetadata.LICENSE_TOMBSTONE) { return license; } else if (license != null) { - boolean autoGeneratedLicense = License.isAutoGeneratedLicense(license.signature()); - if ((autoGeneratedLicense && SelfGeneratedLicense.verify(license)) - || (autoGeneratedLicense == false && LicenseVerifier.verifyLicense(license))) { + if (license.verified()) { return license; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java index f31c7096bae68..0daf6811959ec 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseVerifier.java @@ -21,6 +21,7 @@ import java.nio.ByteBuffer; import java.security.InvalidKeyException; import java.security.NoSuchAlgorithmException; +import java.security.PublicKey; import java.security.Signature; import java.security.SignatureException; import java.util.Arrays; @@ -38,7 +39,7 @@ public class LicenseVerifier { * @param license to verify * @return true if valid, false otherwise */ - public static boolean verifyLicense(final License license, byte[] publicKeyData) { + public static boolean verifyLicense(final License license, PublicKey publicKey) { byte[] signedContent = null; byte[] publicKeyFingerprint = null; try { @@ -58,7 +59,7 @@ public static boolean verifyLicense(final License license, byte[] publicKeyData) XContentBuilder contentBuilder = XContentFactory.contentBuilder(XContentType.JSON); license.toXContent(contentBuilder, new ToXContent.MapParams(Collections.singletonMap(License.LICENSE_SPEC_VIEW_MODE, "true"))); Signature rsa = Signature.getInstance("SHA512withRSA"); - rsa.initVerify(CryptUtils.readPublicKey(publicKeyData)); + rsa.initVerify(publicKey); BytesRefIterator iterator = BytesReference.bytes(contentBuilder).iterator(); BytesRef ref; while ((ref = iterator.next()) != null) { @@ -74,15 +75,19 @@ public static boolean verifyLicense(final License license, byte[] publicKeyData) } } - public static boolean verifyLicense(final License license) { - final byte[] publicKeyBytes; + private static final PublicKey PUBLIC_KEY; + + static { try (InputStream is = LicenseVerifier.class.getResourceAsStream("/public.key")) { ByteArrayOutputStream out = new ByteArrayOutputStream(); Streams.copy(is, out); - publicKeyBytes = out.toByteArray(); - } catch (IOException ex) { - throw new IllegalStateException(ex); + PUBLIC_KEY = CryptUtils.readPublicKey(out.toByteArray()); + } catch (IOException e) { + throw new AssertionError("key file is part of the source and must deserialize correctly", e); } - return verifyLicense(license, publicKeyBytes); + } + + public static boolean verifyLicense(final License license) { + return verifyLicense(license, PUBLIC_KEY); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java index 10d11553ddfd7..0ff68b6b562f0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java @@ -52,7 +52,7 @@ public class LicensesMetadata extends AbstractNamedDiffable .expiryDate(0) .build(); - private License license; + private final License license; // This field describes the version of x-pack for which this cluster has exercised a trial. If the field // is null, then no trial has been exercised. We keep the version to leave open the possibility that we diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java index ec1c075f888d9..f715592fc5f6c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportDeleteLicenseAction.java @@ -61,7 +61,6 @@ protected void masterOperation( final ActionListener listener ) throws ElasticsearchException { licenseService.removeLicense( - request, listener.delegateFailure( (l, postStartBasicResponse) -> l.onResponse(AcknowledgedResponse.of(postStartBasicResponse.isAcknowledged())) ) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java index 75ecf85968283..988b77595e3f1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetLicenseAction.java @@ -23,13 +23,10 @@ public class TransportGetLicenseAction extends TransportMasterNodeReadAction { - private final LicenseService licenseService; - @Inject public TransportGetLicenseAction( TransportService transportService, ClusterService clusterService, - LicenseService licenseService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver @@ -43,9 +40,8 @@ public TransportGetLicenseAction( GetLicenseRequest::new, indexNameExpressionResolver, GetLicenseResponse::new, - ThreadPool.Names.MANAGEMENT + ThreadPool.Names.SAME ); - this.licenseService = licenseService; } @Override @@ -60,6 +56,6 @@ protected void masterOperation( ClusterState state, final ActionListener listener ) throws ElasticsearchException { - listener.onResponse(new GetLicenseResponse(licenseService.getLicense())); + listener.onResponse(new GetLicenseResponse(LicenseService.getLicense(state.metadata()))); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java index 7b6162211189d..4595694d99f1d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; import org.elasticsearch.protocol.xpack.license.LicensesStatus; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; @@ -125,7 +124,7 @@ public void testRemoveLicenses() throws Exception { private void removeAndAckSignedLicenses(final LicenseService licenseService) { final CountDownLatch latch = new CountDownLatch(1); final AtomicBoolean success = new AtomicBoolean(false); - licenseService.removeLicense(new DeleteLicenseRequest(), new ActionListener() { + licenseService.removeLicense(new ActionListener() { @Override public void onResponse(PostStartBasicResponse postStartBasicResponse) { if (postStartBasicResponse.isAcknowledged()) { From 5cb5d921b68f41b0c4a559f2360dcb78f666ac61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Francisco=20Fern=C3=A1ndez=20Casta=C3=B1o?= Date: Mon, 29 Nov 2021 11:12:14 +0100 Subject: [PATCH 61/88] Fix race condition in SnapshotBasedIndexRecoveryIT (#79404) If we don't cancel the re-location of the index to the same target node, it is possible that the recovery is retried, meaning that it's possible that the available permit is granted to indexRecoveredFromSnapshot1 instead of to indexRecoveredFromSnapshot2. Relates #79316 Closes #79420 --- .../recovery/PeerRecoveryTargetService.java | 7 +++++- .../SnapshotBasedIndexRecoveryIT.java | 23 ++++++++++++++++--- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 632e495e15a71..968555e0628b3 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -169,7 +169,7 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh } public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourceNode, final RecoveryListener listener) { - final Releasable snapshotFileDownloadsPermit = recoverySettings.tryAcquireSnapshotDownloadPermits(); + final Releasable snapshotFileDownloadsPermit = tryAcquireSnapshotDownloadPermits(); // create a new recovery status, and process... final long recoveryId = onGoingRecoveries.startRecovery( indexShard, @@ -258,6 +258,11 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi ); } + // Visible for testing + public Releasable tryAcquireSnapshotDownloadPermits() { + return recoverySettings.tryAcquireSnapshotDownloadPermits(); + } + /** * Prepare the start recovery request. * diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index a76cc2c6018c7..40bc86fbf77b9 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.CheckedRunnable; +import org.elasticsearch.core.Releasable; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.MergePolicyConfig; @@ -85,6 +86,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; +import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING; import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS; import static org.elasticsearch.indices.recovery.RecoverySettings.INDICES_RECOVERY_MAX_CONCURRENT_SNAPSHOT_FILE_DOWNLOADS_PER_NODE; @@ -914,7 +916,6 @@ public void testRecoveryUsingSnapshotsIsThrottledPerNode() throws Exception { ); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/79420") public void testRecoveryUsingSnapshotsPermitIsReturnedAfterFailureOrCancellation() throws Exception { executeRecoveryWithSnapshotFileDownloadThrottled( ( @@ -930,7 +931,12 @@ public void testRecoveryUsingSnapshotsPermitIsReturnedAfterFailureOrCancellation client().admin() .indices() .prepareUpdateSettings(indexRecoveredFromSnapshot1) - .setSettings(Settings.builder().put("index.routing.allocation.require._name", targetNode)) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.routing.allocation.require._name", (String) null) + .put("index.routing.allocation.include._name", sourceNode + "," + targetNode) + ) .get() ); @@ -963,6 +969,16 @@ public void testRecoveryUsingSnapshotsPermitIsReturnedAfterFailureOrCancellation targetMockTransportService.clearAllRules(); channelRef.get().sendResponse(new IOException("unable to clean files")); + PeerRecoveryTargetService peerRecoveryTargetService = internalCluster().getInstance( + PeerRecoveryTargetService.class, + targetNode + ); + assertBusy(() -> { + // Wait until the current RecoveryTarget releases the snapshot download permit + try (Releasable snapshotDownloadPermit = peerRecoveryTargetService.tryAcquireSnapshotDownloadPermits()) { + assertThat(snapshotDownloadPermit, is(notNullValue())); + } + }); } String indexRecoveredFromSnapshot2 = indices.get(1); @@ -1140,10 +1156,11 @@ private void executeRecoveryWithSnapshotFileDownloadThrottled(SnapshotBasedRecov indexName, Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "1s") .put("index.routing.allocation.require._name", dataNodes.get(0)) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), 0) .build() ); indices.add(indexName); From c0b4b6080d721991ad7c5d666536b80561ad84bf Mon Sep 17 00:00:00 2001 From: Christos Soulios <1561376+csoulios@users.noreply.github.com> Date: Mon, 29 Nov 2021 12:44:28 +0200 Subject: [PATCH 62/88] TSDB: Add `_tsid` field to time_series indices (#80276) This PR adds support for a field named _tsid that uniquely identifies the time series a document belongs to. When a document is indexed in a time series index (IndexMode.TIME_SERIES), _tsid field is generated from the values of all dimension fields. --- .../upgrades/FullClusterRestartIT.java | 4 +- .../elasticsearch/upgrades/IndexingIT.java | 18 +- ...dimension_and_metric_in_non_tsdb_index.yml | 36 ++ .../rest-api-spec/test/tsdb/10_settings.yml | 2 + .../rest-api-spec/test/tsdb/20_mapping.yml | 5 +- .../rest-api-spec/test/tsdb/30_snapshot.yml | 9 +- .../rest-api-spec/test/tsdb/40_search.yml | 82 ++- .../rest-api-spec/test/tsdb/50_alias.yml | 54 +- .../test/tsdb/60_add_dimensions.yml | 40 +- .../test/tsdb/70_dimension_types.yml | 84 ++- .../test/tsdb/80_index_resize.yml | 22 +- .../org/elasticsearch/index/IndexMode.java | 21 + .../elasticsearch/index/IndexSortConfig.java | 18 +- .../index/mapper/IpFieldMapper.java | 21 +- .../index/mapper/KeywordFieldMapper.java | 34 +- .../index/mapper/LuceneDocument.java | 31 + .../index/mapper/MapperService.java | 9 +- .../index/mapper/NumberFieldMapper.java | 27 +- .../index/mapper/TimeSeriesIdFieldMapper.java | 226 +++++++ .../elasticsearch/indices/IndicesModule.java | 2 + .../elasticsearch/search/DocValueFormat.java | 29 + .../elasticsearch/search/SearchModule.java | 1 + .../bucket/terms/StringTerms.java | 3 + .../index/IndexSortSettingsTests.java | 88 ++- .../mapper/FieldFilterMapperPluginTests.java | 14 +- .../index/mapper/KeywordFieldMapperTests.java | 2 +- .../index/mapper/MappingParserTests.java | 9 +- .../mapper/TimeSeriesIdFieldMapperTests.java | 573 ++++++++++++++++++ .../indices/IndicesModuleTests.java | 2 + .../index/mapper/MapperServiceTestCase.java | 4 + 30 files changed, 1308 insertions(+), 162 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java create mode 100644 server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 745276d3c4145..4f7082e86781c 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -225,7 +225,7 @@ public void testNewReplicas() throws Exception { } public void testSearchTimeSeriesMode() throws Exception { - assumeTrue("time series mode introduced in 8.0.0", getOldClusterVersion().onOrAfter(Version.V_8_0_0)); + assumeTrue("time series index sort by _tsid introduced in 8.1.0", getOldClusterVersion().onOrAfter(Version.V_8_1_0)); int numDocs; if (isRunningAgainstOldCluster()) { numDocs = createTimeSeriesModeIndex(1); @@ -267,7 +267,7 @@ public void testSearchTimeSeriesMode() throws Exception { } public void testNewReplicasTimeSeriesMode() throws Exception { - assumeTrue("time series mode introduced in 8.0.0", getOldClusterVersion().onOrAfter(Version.V_8_0_0)); + assumeTrue("time series index sort by _tsid introduced in 8.1.0", getOldClusterVersion().onOrAfter(Version.V_8_1_0)); if (isRunningAgainstOldCluster()) { createTimeSeriesModeIndex(0); } else { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index 21367aba17978..52bbd2b41bf9b 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -258,7 +258,7 @@ private void bulk(String index, String valueSuffix, int count) throws IOExceptio } public void testTsdb() throws IOException { - assumeTrue("tsdb added in 8.0.0", UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_0_0)); + assumeTrue("sort by _tsid added in 8.1.0", UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_1_0)); StringBuilder bulk = new StringBuilder(); switch (CLUSTER_TYPE) { @@ -343,20 +343,9 @@ private void assertTsdbAgg(Matcher... expected) throws IOException { Request request = new Request("POST", "/tsdb/_search"); request.addParameter("size", "0"); XContentBuilder body = JsonXContent.contentBuilder().startObject(); - // TODO replace tsid runtime field with real tsid - body.startObject("runtime_mappings"); - { - body.startObject("tsid"); - { - body.field("type", "keyword"); - body.field("script", "emit('dim:' + doc['dim'].value)"); - } - body.endObject(); - } - body.endObject(); body.startObject("aggs").startObject("tsids"); { - body.startObject("terms").field("field", "tsid").endObject(); + body.startObject("terms").field("field", "_tsid").endObject(); body.startObject("aggs").startObject("avg"); { body.startObject("avg").field("field", "value").endObject(); @@ -367,8 +356,7 @@ private void assertTsdbAgg(Matcher... expected) throws IOException { request.setJsonEntity(Strings.toString(body.endObject())); ListMatcher tsidsExpected = matchesList(); for (int d = 0; d < expected.length; d++) { - // Object key = Map.of("dim", TSDB_DIMS.get(d)); TODO use this once tsid is real - Object key = "dim:" + TSDB_DIMS.get(d); + Object key = Map.of("dim", TSDB_DIMS.get(d)); tsidsExpected = tsidsExpected.item(matchesMap().extraOk().entry("key", key).entry("avg", Map.of("value", expected[d]))); } assertMap( diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml index ec67748212a5c..7b1b848240d1e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/05_dimension_and_metric_in_non_tsdb_index.yml @@ -178,3 +178,39 @@ can't shadow metrics: runtime_mappings: deep.deeper.deepest: type: keyword + +--- +# Test that _tsid field is not added if an index is not a time-series index +no _tsid in standard indices: + - skip: + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 + + - do: + indices.create: + index: test + body: + settings: + index: + number_of_replicas: 0 + number_of_shards: 2 + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + + - do: + field_caps: + index: test + fields: [metricset, _tsid] + + - match: {fields.metricset.keyword.searchable: true} + - match: {fields.metricset.keyword.aggregatable: true} + - match: {fields.metricset.keyword.time_series_dimension: true} + - is_false: fields.metricset.keyword.indices + - is_false: fields.metricset.keyword.non_searchable_indices + - is_false: fields.metricset.keyword.non_aggregatable_indices + - is_false: fields._tsid # _tsid metadata field must not exist in non-time-series indices diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml index 709f633e74820..eed1ccb7247cf 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/10_settings.yml @@ -186,6 +186,8 @@ set start_time and end_time: end_time: 1632625792000 mappings: properties: + "@timestamp": + type: date metricset: type: keyword time_series_dimension: true diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml index c7b8b97b32ff4..1d11cde944d45 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/20_mapping.yml @@ -204,6 +204,9 @@ runtime field matching routing path: properties: "@timestamp": type: date + dim_kw: + type: "keyword" + time_series_dimension: true dim: type: object dynamic: runtime @@ -214,7 +217,7 @@ runtime field matching routing path: index: test body: - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:04.467Z", "dim": {"foo": "a"}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "dim_kw": "dim", "dim": {"foo": "a"}}' - match: {items.0.index.error.reason: "All fields that match routing_path must be keywords with [time_series_dimension: true] and without the [script] parameter. [dim.foo] was a runtime [keyword]."} --- diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml index e606e4dd82ca2..cb4c0ce663536 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/30_snapshot.yml @@ -17,8 +17,8 @@ teardown: --- "Create a snapshot and then restore it": - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 features: ["allowed_warnings"] # Create index @@ -134,10 +134,13 @@ teardown: search: index: test_index body: + fields: + - field: _tsid query: query_string: query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - match: {hits.total.value: 1} - match: {hits.hits.0._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507} - # TODO assert the _tsid once we generate it + - match: {hits.hits.0.fields._tsid: [ { k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod } ] } + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml index 223d87ab96a09..ca8b32fb0c89f 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/40_search.yml @@ -90,7 +90,20 @@ query a metric: - match: {hits.total.value: 1} -# TODO add test showing that quering _tsid fails +--- +"query tsid fails": + - skip: + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 + + - do: + catch: /\[_tsid\] is not searchable/ + search: + index: test + body: + query: + term: + _tsid: wont't work --- fetch a dimension: @@ -151,7 +164,24 @@ fetch a tag: - match: {hits.hits.0.fields.k8s\.pod\.ip: ['10.10.55.2']} - is_false: hits.hits.0.fields._tsid # tsid isn't fetched by default -# TODO add test to fetch the tsid +--- +"fetch the tsid": + - skip: + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 + + - do: + search: + index: test + body: + fields: + - field: _tsid + query: + query_string: + query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' + + - match: {hits.total.value: 1} + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} --- aggregate a dimension: @@ -229,23 +259,44 @@ aggregate a tag: - match: {aggregations.ips.buckets.2.key: 10.10.55.3} - match: {aggregations.ips.buckets.2.doc_count: 4} +--- +"aggregate the tsid": + - skip: + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 + + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc -# TODO add a test aggregating the _tsid + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - match: {aggregations.tsids.buckets.1.key: {k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9, metricset: pod}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} --- field capabilities: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: field_caps: index: test - fields: [k8s.pod.uid, k8s.pod.network.rx, k8s.pod.ip, _tsid] + fields: [k8s.pod.uid, k8s.pod.network.rx, k8s.pod.ip, metricset, _tsid] - # TODO assert time_series_metric and time_series_dimension - - match: {fields.k8s\.pod\.uid.keyword.searchable: true} - - match: {fields.k8s\.pod\.uid.keyword.aggregatable: true} + - match: {fields.k8s\.pod\.uid.keyword.searchable: true} + - match: {fields.k8s\.pod\.uid.keyword.aggregatable: true} + - match: {fields.k8s\.pod\.uid.keyword.time_series_dimension: true} - is_false: fields.k8s\.pod\.uid.keyword.indices - is_false: fields.k8s\.pod\.uid.keyword.non_searchable_indices - is_false: fields.k8s\.pod\.uid.keyword.non_aggregatable_indices @@ -259,4 +310,15 @@ field capabilities: - is_false: fields.k8s\.pod\.ip.ip.indices - is_false: fields.k8s\.pod\.ip.ip.non_searchable_indices - is_false: fields.k8s\.pod\.ip.ip.non_aggregatable_indices - # TODO assert tsid once we build it: + - match: {fields.metricset.keyword.searchable: true} + - match: {fields.metricset.keyword.aggregatable: true} + - match: {fields.metricset.keyword.time_series_dimension: true} + - is_false: fields.metricset.keyword.indices + - is_false: fields.metricset.keyword.non_searchable_indices + - is_false: fields.metricset.keyword.non_aggregatable_indices + - match: {fields._tsid._tsid.metadata_field: true} + - match: {fields._tsid._tsid.searchable: false} + - match: {fields._tsid._tsid.aggregatable: true} + - is_false: fields._tsid._tsid.indices + - is_false: fields._tsid._tsid.non_searchable_indices + - is_false: fields._tsid._tsid.non_aggregatable_indices diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_alias.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_alias.yml index 5a187ce0b6430..f404213bb5113 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_alias.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/50_alias.yml @@ -57,13 +57,40 @@ setup: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}}' -# TODO search on _tsid in an alias +--- +search an alias: + - skip: + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 + + - do: + indices.put_alias: + index: test + name: test_alias + + - do: + search: + index: test_alias + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - match: {aggregations.tsids.buckets.1.key: {k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9, metricset: pod}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} --- index into alias: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.put_alias: @@ -85,4 +112,23 @@ index into alias: - '{"@timestamp": "2021-04-28T18:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39", "ip": "10.10.55.4", "network": {"tx": 1434595272, "rx": 530605511}}}}' - match: {errors: false} - # TODO search on tsid once we generate it + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + + - match: {hits.total.value: 12} + - match: {aggregations.tsids.buckets.0.key: {k8s.pod.uid: 1c4fc7b8-93b7-4ba8-b609-2a48af2f8e39, metricset: pod}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - match: {aggregations.tsids.buckets.1.key: {k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - match: {aggregations.tsids.buckets.2.key: {k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9, metricset: pod}} + - match: {aggregations.tsids.buckets.2.doc_count: 4} + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_add_dimensions.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_add_dimensions.yml index ca4aa52e15a13..b17b1303b4245 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_add_dimensions.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/60_add_dimensions.yml @@ -1,8 +1,8 @@ --- add dimensions with put_mapping: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -41,18 +41,18 @@ add dimensions with put_mapping: index: test body: fields: - # TODO fetch the tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO Fetch the tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- add dimensions to no dims with dynamic_template over index: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -87,17 +87,17 @@ add dimensions to no dims with dynamic_template over index: index: test body: fields: - # TODO fetch the tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO fetch the tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- add dimensions to no dims with dynamic_template over bulk: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -132,17 +132,17 @@ add dimensions to no dims with dynamic_template over bulk: index: test body: fields: - # TODO fetch tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO fetch tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- add dimensions to some dims with dynamic_template over index: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -181,17 +181,17 @@ add dimensions to some dims with dynamic_template over index: index: test body: fields: - # TODO fetch tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO fetch tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat, other_dim: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} --- add dimensions to some dims with dynamic_template over bulk: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -229,8 +229,8 @@ add dimensions to some dims with dynamic_template over bulk: index: test body: fields: - # TODO fetch tsid + - field: _tsid - field: "@timestamp" - match: {hits.total.value: 1} - # TODO fetch tsid + - match: {hits.hits.0.fields._tsid: [ { metricset: cat, other_dim: cat } ] } - match: {hits.hits.0.fields.@timestamp: ["2021-04-28T18:35:24.467Z"]} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_dimension_types.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_dimension_types.yml index 06eb087567238..aed895a97980c 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_dimension_types.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/70_dimension_types.yml @@ -1,8 +1,8 @@ keyword dimension: - skip: features: close_to - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -43,14 +43,36 @@ keyword dimension: - '{"@timestamp": "2021-04-28T18:35:54.467Z", "uid": "df3145b3-0563-4d3b-a0f7-897eb2876ea9", "voltage": 3.3}' - is_false: errors - # TODO aggregate on tsid + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + aggs: + voltage: + avg: + field: voltage + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {uid: 947e4ced-1786-4e53-9e0c-5c447e959507}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - close_to: {aggregations.tsids.buckets.0.voltage.value: { value: 7.3, error: 0.01 }} + - match: {aggregations.tsids.buckets.1.key: {uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9}} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - close_to: {aggregations.tsids.buckets.1.voltage.value: { value: 3.3, error: 0.01 }} --- long dimension: - skip: features: close_to - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -93,14 +115,36 @@ long dimension: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:35:54.467Z", "metricset": "aa", "id": 2, "voltage": 3.3}' - # TODO aggregate on tsid + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + aggs: + voltage: + avg: + field: voltage + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: {id: 1, metricset: aa}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - close_to: {aggregations.tsids.buckets.0.voltage.value: { value: 7.3, error: 0.01 }} + - match: {aggregations.tsids.buckets.1.key: {id: 2, metricset: aa }} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - close_to: {aggregations.tsids.buckets.1.voltage.value: { value: 3.3, error: 0.01 }} --- ip dimension: - skip: features: close_to - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.create: @@ -143,4 +187,26 @@ ip dimension: - '{"index": {}}' - '{"@timestamp": "2021-04-28T18:35:54.467Z", "metricset": "aa", "ip": "2001:0db8:85a3::8a2e:0370:7334", "voltage": 3.3}' - # TODO aggregate on tsid + - do: + search: + index: test + body: + size: 0 + aggs: + tsids: + terms: + field: _tsid + order: + _key: asc + aggs: + voltage: + avg: + field: voltage + + - match: {hits.total.value: 8} + - match: {aggregations.tsids.buckets.0.key: { ip: "10.10.1.1", metricset: aa}} + - match: {aggregations.tsids.buckets.0.doc_count: 4} + - close_to: {aggregations.tsids.buckets.0.voltage.value: { value: 7.3, error: 0.01 }} + - match: {aggregations.tsids.buckets.1.key: { ip: "2001:db8:85a3::8a2e:370:7334", metricset: aa }} + - match: {aggregations.tsids.buckets.1.doc_count: 4} + - close_to: {aggregations.tsids.buckets.1.voltage.value: { value: 3.3, error: 0.01 }} diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml index 8dd33551912a4..2584fdf4dd2db 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/tsdb/80_index_resize.yml @@ -100,19 +100,19 @@ split: index: test_split body: fields: - # TODO fetch tsid + - field: _tsid query: query_string: query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - match: {hits.total.value: 1} - # TODO test fetching tsid + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} --- shrink: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.shrink: @@ -126,19 +126,20 @@ shrink: search: index: test_shrink body: - # TODO test fetching tsid + fields: + - field: _tsid query: query_string: query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - match: {hits.total.value: 1} - # TODO test fetching tsid + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} --- clone: - skip: - version: " - 7.99.99" - reason: introduced in 8.0.0 + version: " - 8.00.99" + reason: _tsid support introduced in 8.1.0 - do: indices.clone: @@ -149,10 +150,11 @@ clone: search: index: test_clone body: - # TODO test fetching tsid + fields: + - field: _tsid query: query_string: query: '+@timestamp:"2021-04-28T18:51:04.467Z" +k8s.pod.name:cat' - match: {hits.total.value: 1} - # TODO test fetching tsid + - match: {hits.hits.0.fields._tsid: [{k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507, metricset: pod}]} diff --git a/server/src/main/java/org/elasticsearch/index/IndexMode.java b/server/src/main/java/org/elasticsearch/index/IndexMode.java index 0107a1ad817d8..ec0e689dfb238 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexMode.java +++ b/server/src/main/java/org/elasticsearch/index/IndexMode.java @@ -17,7 +17,9 @@ import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MappingLookup; +import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import java.io.IOException; import java.util.Collections; @@ -67,6 +69,13 @@ public void validateTimestampFieldMapping(boolean isDataStream, MappingLookup ma public Map getDefaultMapping() { return Collections.emptyMap(); } + + @Override + public MetadataFieldMapper buildTimeSeriesIdFieldMapper() { + // non time-series indices must not have a TimeSeriesIdFieldMapper + return null; + } + }, TIME_SERIES { @Override @@ -124,6 +133,11 @@ private String routingRequiredBad() { private String tsdbMode() { return "[" + IndexSettings.MODE.getKey() + "=time_series]"; } + + @Override + public MetadataFieldMapper buildTimeSeriesIdFieldMapper() { + return TimeSeriesIdFieldMapper.INSTANCE; + } }; public static final Map DEFAULT_TIME_SERIES_TIMESTAMP_MAPPING = Map.of( @@ -177,4 +191,11 @@ private String tsdbMode() { * @return */ public abstract Map getDefaultMapping(); + + /** + * Return an instance of the {@link TimeSeriesIdFieldMapper} that generates + * the _tsid field. The field mapper will be added to the list of the metadata + * field mappers for the index. + */ + public abstract MetadataFieldMapper buildTimeSeriesIdFieldMapper(); } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java index a9591dd76279f..a95b63321171f 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSortConfig.java @@ -18,7 +18,9 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.sort.SortOrder; @@ -127,11 +129,21 @@ private static MultiValueMode parseMultiValueMode(String value) { final FieldSortSpec[] sortSpecs; private final Version indexCreatedVersion; private final String indexName; + private final IndexMode indexMode; public IndexSortConfig(IndexSettings indexSettings) { final Settings settings = indexSettings.getSettings(); this.indexCreatedVersion = indexSettings.getIndexVersionCreated(); this.indexName = indexSettings.getIndex().getName(); + this.indexMode = indexSettings.getMode(); + + if (this.indexMode == IndexMode.TIME_SERIES) { + this.sortSpecs = new FieldSortSpec[] { + new FieldSortSpec(TimeSeriesIdFieldMapper.NAME), + new FieldSortSpec(DataStreamTimestampFieldMapper.DEFAULT_PATH) }; + return; + } + List fields = INDEX_SORT_FIELD_SETTING.get(settings); this.sortSpecs = fields.stream().map((name) -> new FieldSortSpec(name)).toArray(FieldSortSpec[]::new); @@ -198,7 +210,11 @@ public Sort buildIndexSort( FieldSortSpec sortSpec = sortSpecs[i]; final MappedFieldType ft = fieldTypeLookup.apply(sortSpec.field); if (ft == null) { - throw new IllegalArgumentException("unknown index sort field:[" + sortSpec.field + "]"); + String err = "unknown index sort field:[" + sortSpec.field + "]"; + if (this.indexMode == IndexMode.TIME_SERIES) { + err += " required by [" + IndexSettings.MODE.getKey() + "=time_series]"; + } + throw new IllegalArgumentException(err); } if (Objects.equals(ft.name(), sortSpec.field) == false) { if (this.indexCreatedVersion.onOrAfter(Version.V_7_13_0)) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index 1b30adbae695d..b1497c8e988dc 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -19,9 +19,11 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -497,18 +499,17 @@ private static InetAddress value(XContentParser parser, InetAddress nullValue) t } private void indexValue(DocumentParserContext context, InetAddress address) { + if (dimension) { + // Encode the tsid part of the dimension field if the _tsid field is enabled. + // If the _tsid field is not enabled, we can skip the encoding part. + BytesReference bytes = context.getMetadataMapper(TimeSeriesIdFieldMapper.NAME) != null + ? TimeSeriesIdFieldMapper.encodeTsidValue(NetworkAddress.format(address)) + : null; + context.doc().addDimensionBytes(fieldType().name(), bytes); + } if (indexed) { Field field = new InetAddressPoint(fieldType().name(), address); - if (dimension) { - // Add dimension field with key so that we ensure it is single-valued. - // Dimension fields are always indexed. - if (context.doc().getByKey(fieldType().name()) != null) { - throw new IllegalArgumentException("Dimension field [" + fieldType().name() + "] cannot be a multi-valued field."); - } - context.doc().addWithKey(fieldType().name(), field); - } else { - context.doc().add(field); - } + context.doc().add(field); } if (hasDocValues) { context.doc().add(new SortedSetDocValuesField(fieldType().name(), new BytesRef(InetAddressPoint.encode(address)))); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index e946e4fc800f4..dd5c412dfabe5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -29,6 +29,7 @@ import org.apache.lucene.util.automaton.CompiledAutomaton.AUTOMATON_TYPE; import org.apache.lucene.util.automaton.MinimizationOperations; import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.AutomatonQueries; import org.elasticsearch.index.analysis.IndexAnalyzers; @@ -69,6 +70,8 @@ public static class Defaults { FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); FIELD_TYPE.freeze(); } + + public static final int IGNORE_ABOVE = Integer.MAX_VALUE; } public static class KeywordField extends Field { @@ -102,7 +105,7 @@ public static class Builder extends FieldMapper.Builder { "ignore_above", true, m -> toType(m).ignoreAbove, - Integer.MAX_VALUE + Defaults.IGNORE_ABOVE ); private final Parameter indexOptions = Parameter.restrictedStringParam( @@ -503,9 +506,6 @@ public void validateMatchedRoutingPath() { } } - /** The maximum keyword length allowed for a dimension field */ - private static final int DIMENSION_MAX_BYTES = 1024; - private final boolean indexed; private final boolean hasDocValues; private final String nullValue; @@ -587,7 +587,6 @@ protected void indexScriptValues( } private void indexValue(DocumentParserContext context, String value) { - if (value == null) { return; } @@ -598,27 +597,20 @@ private void indexValue(DocumentParserContext context, String value) { } value = normalizeValue(fieldType().normalizer(), name(), value); + if (dimension) { + // Encode the tsid part of the dimension field. Although, it would seem reasonable + // to skip the encode part if we don't generate a _tsid field (as we do with number + // and ip fields), we keep this test because we must ensure that the value of this + // dimension field is not larger than TimeSeriesIdFieldMapper.DIMENSION_VALUE_LIMIT + BytesReference bytes = TimeSeriesIdFieldMapper.encodeTsidValue(value); + context.doc().addDimensionBytes(fieldType().name(), bytes); + } // convert to utf8 only once before feeding postings/dv/stored fields final BytesRef binaryValue = new BytesRef(value); - if (dimension && binaryValue.length > DIMENSION_MAX_BYTES) { - throw new IllegalArgumentException( - "Dimension field [" + fieldType().name() + "] cannot be more than [" + DIMENSION_MAX_BYTES + "] bytes long." - ); - } if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) { Field field = new KeywordField(fieldType().name(), binaryValue, fieldType); - if (dimension) { - // Check that a dimension field is single-valued and not an array - if (context.doc().getByKey(fieldType().name()) != null) { - throw new IllegalArgumentException("Dimension field [" + fieldType().name() + "] cannot be a multi-valued field."); - } - // Add dimension field with key so that we ensure it is single-valued. - // Dimension fields are always indexed. - context.doc().addWithKey(fieldType().name(), field); - } else { - context.doc().add(field); - } + context.doc().add(field); if (fieldType().hasDocValues() == false && fieldType.omitNorms()) { context.addToFieldNames(fieldType().name()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LuceneDocument.java b/server/src/main/java/org/elasticsearch/index/mapper/LuceneDocument.java index 22b5d8bfc8ffa..3cb2b030ebeff 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/LuceneDocument.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/LuceneDocument.java @@ -10,12 +10,16 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; /** * Fork of {@link org.apache.lucene.document.Document} with additional functionality. @@ -27,6 +31,12 @@ public class LuceneDocument implements Iterable { private final String prefix; private final List fields; private Map keyedFields; + /** + * A sorted map of the serialized values of dimension fields that will be used + * for generating the _tsid field. The map will be used by {@link TimeSeriesIdFieldMapper} + * to build the _tsid field for the document. + */ + private SortedMap dimensionBytes; LuceneDocument(String path, LuceneDocument parent) { fields = new ArrayList<>(); @@ -99,6 +109,27 @@ public IndexableField getByKey(Object key) { return keyedFields == null ? null : keyedFields.get(key); } + /** + * Add the serialized byte reference for a dimension field. This will be used by {@link TimeSeriesIdFieldMapper} + * to build the _tsid field for the document. + */ + public void addDimensionBytes(String fieldName, BytesReference tsidBytes) { + if (dimensionBytes == null) { + // It is a {@link TreeMap} so that it is order by field name. + dimensionBytes = new TreeMap<>(); + } else if (dimensionBytes.containsKey(fieldName)) { + throw new IllegalArgumentException("Dimension field [" + fieldName + "] cannot be a multi-valued field."); + } + dimensionBytes.put(fieldName, tsidBytes); + } + + public SortedMap getDimensionBytes() { + if (dimensionBytes == null) { + return Collections.emptySortedMap(); + } + return dimensionBytes; + } + public IndexableField[] getFields(String name) { List f = new ArrayList<>(); for (IndexableField field : fields) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index e46193f9b237e..760ed2427cb44 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -206,9 +206,14 @@ Map, MetadataFieldMapper> getMetadataMapper if (existingMapper == null) { for (MetadataFieldMapper.TypeParser parser : metadataMapperParsers.values()) { MetadataFieldMapper metadataFieldMapper = parser.getDefault(parserContext()); - metadataMappers.put(metadataFieldMapper.getClass(), metadataFieldMapper); + // A MetadataFieldMapper may choose to not be added to the metadata mappers + // of an index (eg TimeSeriesIdFieldMapper is only added to time series indices) + // In this case its TypeParser will return null instead of the MetadataFieldMapper + // instance. + if (metadataFieldMapper != null) { + metadataMappers.put(metadataFieldMapper.getClass(), metadataFieldMapper); + } } - } else { metadataMappers.putAll(existingMapper.mapping().getMetadataMappersMap()); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index a4bfe18814b7e..b8253c76ceac2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Numbers; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -1354,7 +1355,7 @@ protected String contentType() { protected void parseCreateField(DocumentParserContext context) throws IOException { Number value; try { - value = value(context.parser(), type, nullValue, coerce.value()); + value = value(context.parser(), type, nullValue, coerce()); } catch (InputCoercionException | IllegalArgumentException | JsonParseException e) { if (ignoreMalformed.value() && context.parser().currentToken().isValue()) { context.addIgnoredField(mappedFieldType.name()); @@ -1391,20 +1392,18 @@ private static Number value(XContentParser parser, NumberType numberType, Number } private void indexValue(DocumentParserContext context, Number numericValue) { - List fields = fieldType().type.createFields(fieldType().name(), numericValue, indexed, hasDocValues, stored); - if (dimension) { - // Check that a dimension field is single-valued and not an array - if (context.doc().getByKey(fieldType().name()) != null) { - throw new IllegalArgumentException("Dimension field [" + fieldType().name() + "] cannot be a multi-valued field."); - } - if (fields.size() > 0) { - // Add the first field by key so that we can validate if it has been added - context.doc().addWithKey(fieldType().name(), fields.get(0)); - context.doc().addAll(fields.subList(1, fields.size())); - } - } else { - context.doc().addAll(fields); + if (dimension && numericValue != null) { + // Dimension can only be one of byte, short, int, long. So, we encode the tsid + // part of the dimension field by using the long value. + // Also, there is no point in encoding the tsid value if we do not generate + // the _tsid field. + BytesReference bytes = context.getMetadataMapper(TimeSeriesIdFieldMapper.NAME) != null + ? TimeSeriesIdFieldMapper.encodeTsidValue(numericValue.longValue()) + : null; + context.doc().addDimensionBytes(fieldType().name(), bytes); } + List fields = fieldType().type.createFields(fieldType().name(), numericValue, indexed, hasDocValues, stored); + context.doc().addAll(fields); if (hasDocValues == false && (stored || indexed)) { context.addToFieldNames(fieldType().name()); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java new file mode 100644 index 0000000000000..876b43d3cffc0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapper.java @@ -0,0 +1,226 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.SortedSetDocValuesField; +import org.apache.lucene.search.Query; +import org.apache.lucene.util.ByteBlockPool; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; +import org.elasticsearch.index.query.SearchExecutionContext; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.function.Supplier; + +/** + * Mapper for {@code _tsid} field included generated when the index is + * {@link IndexMode#TIME_SERIES organized into time series}. + */ +public class TimeSeriesIdFieldMapper extends MetadataFieldMapper { + + public static final String NAME = "_tsid"; + public static final String CONTENT_TYPE = "_tsid"; + public static final TimeSeriesIdFieldType FIELD_TYPE = new TimeSeriesIdFieldType(); + public static final TimeSeriesIdFieldMapper INSTANCE = new TimeSeriesIdFieldMapper(); + + /** + * The maximum length of the tsid. The value itself comes from a range check in + * Lucene's writer for utf-8 doc values. + */ + private static final int LIMIT = ByteBlockPool.BYTE_BLOCK_SIZE - 2; + /** + * Maximum length of the name of dimension. We picked this so that we could + * comfortable fit 16 dimensions inside {@link #LIMIT}. + */ + private static final int DIMENSION_NAME_LIMIT = 512; + /** + * The maximum length of any single dimension. We picked this so that we could + * comfortable fit 16 dimensions inside {@link #LIMIT}. This should be quite + * comfortable given that dimensions are typically going to be less than a + * hundred bytes each, but we're being paranoid here. + */ + private static final int DIMENSION_VALUE_LIMIT = 1024; + + @Override + public FieldMapper.Builder getMergeBuilder() { + return new Builder().init(this); + } + + public static class Builder extends MetadataFieldMapper.Builder { + protected Builder() { + super(NAME); + } + + @Override + protected List> getParameters() { + return List.of(); + } + + @Override + public TimeSeriesIdFieldMapper build() { + return INSTANCE; + } + } + + public static final TypeParser PARSER = new FixedTypeParser(c -> c.getIndexSettings().getMode().buildTimeSeriesIdFieldMapper()); + + public static final class TimeSeriesIdFieldType extends MappedFieldType { + private TimeSeriesIdFieldType() { + super(NAME, false, false, true, TextSearchInfo.NONE, Collections.emptyMap()); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return new DocValueFetcher(docValueFormat(format, null), context.getForField(this)); + } + + @Override + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { + if (format != null) { + throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] doesn't support formats."); + } + return DocValueFormat.TIME_SERIES_ID; + } + + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { + failIfNoDocValues(); + // TODO don't leak the TSID's binary format into the script + return new SortedSetOrdinalsIndexFieldData.Builder(name(), CoreValuesSourceType.KEYWORD); + } + + @Override + public Query termQuery(Object value, SearchExecutionContext context) { + throw new IllegalArgumentException("[" + NAME + "] is not searchable"); + } + } + + private TimeSeriesIdFieldMapper() { + super(FIELD_TYPE); + } + + @Override + public void postParse(DocumentParserContext context) throws IOException { + assert fieldType().isSearchable() == false; + + // SortedMap is expected to be sorted by key (field name) + SortedMap dimensionFields = context.doc().getDimensionBytes(); + if (dimensionFields.isEmpty()) { + throw new IllegalArgumentException("Dimension fields are missing."); + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(dimensionFields.size()); + for (Map.Entry entry : dimensionFields.entrySet()) { + String fieldName = entry.getKey(); + BytesRef fieldNameBytes = new BytesRef(fieldName); + int len = fieldNameBytes.length; + if (len > DIMENSION_NAME_LIMIT) { + throw new IllegalArgumentException( + "Dimension name must be less than [" + DIMENSION_NAME_LIMIT + "] bytes but [" + fieldName + "] was [" + len + "]." + ); + } + // Write field name in utf-8 instead of writeString's utf-16-ish thing + out.writeBytesRef(fieldNameBytes); + entry.getValue().writeTo(out); + } + + BytesReference timeSeriesId = out.bytes(); + if (timeSeriesId.length() > LIMIT) { + throw new IllegalArgumentException(NAME + " longer than [" + LIMIT + "] bytes [" + timeSeriesId.length() + "]."); + } + assert timeSeriesId != null : "In time series mode _tsid cannot be null"; + context.doc().add(new SortedSetDocValuesField(fieldType().name(), timeSeriesId.toBytesRef())); + } + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + /** + * Decode the {@code _tsid} into a human readable map. + */ + public static Map decodeTsid(StreamInput in) { + try { + int size = in.readVInt(); + Map result = new LinkedHashMap(size); + + for (int i = 0; i < size; i++) { + String name = in.readString(); + + int type = in.read(); + switch (type) { + case (byte) 's': + result.put(name, in.readBytesRef().utf8ToString()); + break; + case (byte) 'l': + result.put(name, in.readLong()); + break; + default: + throw new IllegalArgumentException("Cannot parse [" + name + "]: Unknown type [" + type + "]"); + } + } + return result; + } catch (IOException | IllegalArgumentException e) { + throw new IllegalArgumentException("Error formatting " + NAME + ": " + e.getMessage(), e); + } + } + + static BytesReference encodeTsidValue(String value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.write((byte) 's'); + /* + * Write in utf8 instead of StreamOutput#writeString which is utf-16-ish + * so its easier for folks to reason about the space taken up. Mostly + * it'll be smaller too. + */ + BytesRef bytes = new BytesRef(value); + if (bytes.length > DIMENSION_VALUE_LIMIT) { + throw new IllegalArgumentException( + "Dimension fields must be less than [" + DIMENSION_VALUE_LIMIT + "] bytes but was [" + bytes.length + "]." + ); + } + out.writeBytesRef(bytes); + return out.bytes(); + } catch (IOException e) { + throw new IllegalArgumentException("Dimension field cannot be serialized.", e); + } + } + + static BytesReference encodeTsidValue(long value) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.write((byte) 'l'); + out.writeLong(value); + return out.bytes(); + } catch (IOException e) { + throw new IllegalArgumentException("Dimension field cannot be serialized.", e); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index d4393b2a5e563..917f261c219ea 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -51,6 +51,7 @@ import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.mapper.flattened.FlattenedFieldMapper; import org.elasticsearch.index.seqno.RetentionLeaseBackgroundSyncAction; @@ -194,6 +195,7 @@ private static Map initBuiltInMetadataMa // (so will benefit from "fields: []" early termination builtInMetadataMappers.put(IdFieldMapper.NAME, IdFieldMapper.PARSER); builtInMetadataMappers.put(RoutingFieldMapper.NAME, RoutingFieldMapper.PARSER); + builtInMetadataMappers.put(TimeSeriesIdFieldMapper.NAME, TimeSeriesIdFieldMapper.PARSER); builtInMetadataMappers.put(IndexFieldMapper.NAME, IndexFieldMapper.PARSER); builtInMetadataMappers.put(SourceFieldMapper.NAME, SourceFieldMapper.PARSER); builtInMetadataMappers.put(NestedPathFieldMapper.NAME, NestedPathFieldMapper.PARSER); diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 0afb6c48752d0..dfad44a37e3d7 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -20,6 +21,7 @@ import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.geometry.utils.Geohash; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileUtils; import java.io.IOException; @@ -668,4 +670,31 @@ public double parseDouble(String value, boolean roundUp, LongSupplier now) { return Double.parseDouble(value); } }; + + DocValueFormat TIME_SERIES_ID = new TimeSeriesIdDocValueFormat(); + + /** + * DocValues format for time series id. + */ + class TimeSeriesIdDocValueFormat implements DocValueFormat { + private TimeSeriesIdDocValueFormat() {} + + @Override + public String getWriteableName() { + return "tsid"; + } + + @Override + public void writeTo(StreamOutput out) {} + + @Override + public String toString() { + return "tsid"; + } + + @Override + public Object format(BytesRef value) { + return TimeSeriesIdFieldMapper.decodeTsid(new BytesArray(value).streamInput()); + } + }; } diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index f7858e536edf9..22e5214785ba5 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -972,6 +972,7 @@ private void registerValueFormats() { registerValueFormat(DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW); registerValueFormat(DocValueFormat.BINARY.getWriteableName(), in -> DocValueFormat.BINARY); registerValueFormat(DocValueFormat.UNSIGNED_LONG_SHIFTED.getWriteableName(), in -> DocValueFormat.UNSIGNED_LONG_SHIFTED); + registerValueFormat(DocValueFormat.TIME_SERIES_ID.getWriteableName(), in -> DocValueFormat.TIME_SERIES_ID); } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index b50aa2f7dc596..fe27738fe7589 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -85,6 +85,9 @@ public int compareKey(Bucket other) { @Override protected final XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { + if (format == DocValueFormat.TIME_SERIES_ID) { + return builder.field(CommonFields.KEY.getPreferredName(), format.format(termBytes)); + } return builder.field(CommonFields.KEY.getPreferredName(), getKeyAsString()); } diff --git a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java index 9cd0087b511f9..3bd46a0f1ac07 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSortSettingsTests.java @@ -9,13 +9,17 @@ package org.elasticsearch.index; import org.apache.lucene.search.Query; +import org.apache.lucene.search.Sort; import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataService; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.TextSearchInfo; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -26,14 +30,18 @@ import org.elasticsearch.test.ESTestCase; import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.function.Supplier; import static org.elasticsearch.index.IndexSettingsTests.newIndexMeta; +import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; public class IndexSortSettingsTests extends ESTestCase { + private static IndexSettings indexSettings(Settings settings) { return new IndexSettings(newIndexMeta("test", settings), Settings.EMPTY); } @@ -115,13 +123,8 @@ public void testInvalidMissing() { assertThat(exc.getMessage(), containsString("Illegal missing value:[default]," + " must be one of [_last, _first]")); } - public void testIndexSorting() { + public void testIndexSortingNoDocValues() { IndexSettings indexSettings = indexSettings(Settings.builder().put("index.sort.field", "field").build()); - IndexSortConfig config = indexSettings.getIndexSortConfig(); - assertTrue(config.hasIndexSort()); - IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null); - NoneCircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); - final IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, cache, circuitBreakerService); MappedFieldType fieldType = new MappedFieldType("field", false, false, false, TextSearchInfo.NONE, Collections.emptyMap()) { @Override public String typeName() { @@ -144,13 +147,7 @@ public Query termQuery(Object value, SearchExecutionContext context) { throw new UnsupportedOperationException(); } }; - IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> config.buildIndexSort( - field -> fieldType, - (ft, searchLookupSupplier) -> indexFieldDataService.getForField(ft, "index", searchLookupSupplier) - ) - ); + Exception iae = expectThrows(IllegalArgumentException.class, () -> buildIndexSort(indexSettings, fieldType)); assertEquals("docvalues not found for index sort field:[field]", iae.getMessage()); assertThat(iae.getCause(), instanceOf(UnsupportedOperationException.class)); assertEquals("index sorting not supported on runtime field [field]", iae.getCause().getMessage()); @@ -158,16 +155,8 @@ public Query termQuery(Object value, SearchExecutionContext context) { public void testSortingAgainstAliases() { IndexSettings indexSettings = indexSettings(Settings.builder().put("index.sort.field", "field").build()); - IndexSortConfig config = indexSettings.getIndexSortConfig(); - assertTrue(config.hasIndexSort()); - IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null); - NoneCircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); - final IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, cache, circuitBreakerService); - MappedFieldType mft = new KeywordFieldMapper.KeywordFieldType("aliased"); - Exception e = expectThrows( - IllegalArgumentException.class, - () -> config.buildIndexSort(field -> mft, (ft, s) -> indexFieldDataService.getForField(ft, "index", s)) - ); + MappedFieldType aliased = new KeywordFieldMapper.KeywordFieldType("aliased"); + Exception e = expectThrows(IllegalArgumentException.class, () -> buildIndexSort(indexSettings, Map.of("field", aliased))); assertEquals("Cannot use alias [field] as an index sort field", e.getMessage()); } @@ -175,17 +164,54 @@ public void testSortingAgainstAliasesPre713() { IndexSettings indexSettings = indexSettings( Settings.builder().put("index.version.created", Version.V_7_12_0).put("index.sort.field", "field").build() ); - IndexSortConfig config = indexSettings.getIndexSortConfig(); - assertTrue(config.hasIndexSort()); - IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null); - NoneCircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); - final IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, cache, circuitBreakerService); - MappedFieldType mft = new KeywordFieldMapper.KeywordFieldType("aliased"); - config.buildIndexSort(field -> mft, (ft, s) -> indexFieldDataService.getForField(ft, "index", s)); - + MappedFieldType aliased = new KeywordFieldMapper.KeywordFieldType("aliased"); + Sort sort = buildIndexSort(indexSettings, Map.of("field", aliased)); + assertThat(sort.getSort(), arrayWithSize(1)); + assertThat(sort.getSort()[0].getField(), equalTo("aliased")); assertWarnings( "Index sort for index [test] defined on field [field] which resolves to field [aliased]. " + "You will not be able to define an index sort over aliased fields in new indexes" ); } + + public void testTimeSeriesMode() { + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "some_dimension") + .build() + ); + Sort sort = buildIndexSort(indexSettings, TimeSeriesIdFieldMapper.FIELD_TYPE, new DateFieldMapper.DateFieldType("@timestamp")); + assertThat(sort.getSort(), arrayWithSize(2)); + assertThat(sort.getSort()[0].getField(), equalTo("_tsid")); + assertThat(sort.getSort()[1].getField(), equalTo("@timestamp")); + } + + public void testTimeSeriesModeNoTimestamp() { + IndexSettings indexSettings = indexSettings( + Settings.builder() + .put(IndexSettings.MODE.getKey(), "time_series") + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), "some_dimension") + .build() + ); + Exception e = expectThrows(IllegalArgumentException.class, () -> buildIndexSort(indexSettings, TimeSeriesIdFieldMapper.FIELD_TYPE)); + assertThat(e.getMessage(), equalTo("unknown index sort field:[@timestamp] required by [index.mode=time_series]")); + } + + private Sort buildIndexSort(IndexSettings indexSettings, MappedFieldType... mfts) { + Map lookup = new HashMap<>(mfts.length); + for (MappedFieldType mft : mfts) { + assertNull(lookup.put(mft.name(), mft)); + } + return buildIndexSort(indexSettings, lookup); + } + + private Sort buildIndexSort(IndexSettings indexSettings, Map lookup) { + IndexSortConfig config = indexSettings.getIndexSortConfig(); + assertTrue(config.hasIndexSort()); + IndicesFieldDataCache cache = new IndicesFieldDataCache(indexSettings.getSettings(), null); + NoneCircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); + IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, cache, circuitBreakerService); + return config.buildIndexSort(lookup::get, (ft, s) -> indexFieldDataService.getForField(ft, "index", s)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java index e05732c4042a6..4ff57354123bd 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldFilterMapperPluginTests.java @@ -29,6 +29,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -113,8 +114,7 @@ public void testFieldCapabilities() { private static void assertFieldCaps(FieldCapabilitiesResponse fieldCapabilitiesResponse, Collection expectedFields) { Map> responseMap = new HashMap<>(fieldCapabilitiesResponse.get()); - Set builtInMetadataFields = IndicesModule.getBuiltInMetadataFields(); - for (String field : builtInMetadataFields) { + for (String field : builtInMetadataFields()) { Map remove = responseMap.remove(field); assertNotNull(" expected field [" + field + "] not found", remove); } @@ -125,13 +125,19 @@ private static void assertFieldCaps(FieldCapabilitiesResponse fieldCapabilitiesR assertEquals("Some unexpected fields were returned: " + responseMap.keySet(), 0, responseMap.size()); } + private static Set builtInMetadataFields() { + Set builtInMetadataFields = new HashSet<>(IndicesModule.getBuiltInMetadataFields()); + // Index is not a time-series index, and it will not contain a _tsid field + builtInMetadataFields.remove(TimeSeriesIdFieldMapper.NAME); + return builtInMetadataFields; + } + private static void assertFieldMappings( Map actual, Collection expectedFields ) { - Set builtInMetadataFields = IndicesModule.getBuiltInMetadataFields(); Map fields = new HashMap<>(actual); - for (String field : builtInMetadataFields) { + for (String field : builtInMetadataFields()) { GetFieldMappingsResponse.FieldMappingMetadata fieldMappingMetadata = fields.remove(field); assertNotNull(" expected field [" + field + "] not found", fieldMappingMetadata); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java index 63e73c36b3499..63b2ceb0c7925 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/KeywordFieldMapperTests.java @@ -387,7 +387,7 @@ public void testDimensionExtraLongKeyword() throws IOException { MapperParsingException.class, () -> mapper.parse(source(b -> b.field("field", randomAlphaOfLengthBetween(1025, 2048)))) ); - assertThat(e.getCause().getMessage(), containsString("Dimension field [field] cannot be more than [1024] bytes long.")); + assertThat(e.getCause().getMessage(), containsString("Dimension fields must be less than [1024] bytes but was")); } public void testConfigureSimilarity() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index 1cad1c7d50ae8..32caede9f2189 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -50,10 +50,11 @@ private static MappingParser createMappingParser(Settings settings) { indexSettings.getIndexVersionCreated() ); Map, MetadataFieldMapper> metadataMappers = new LinkedHashMap<>(); - metadataMapperParsers.values() - .stream() - .map(parser -> parser.getDefault(parserContextSupplier.get())) - .forEach(m -> metadataMappers.put(m.getClass(), m)); + metadataMapperParsers.values().stream().map(parser -> parser.getDefault(parserContextSupplier.get())).forEach(m -> { + if (m != null) { + metadataMappers.put(m.getClass(), m); + } + }); return new MappingParser( parserContextSupplier, metadataMapperParsers, diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java new file mode 100644 index 0000000000000..81acf8e6c74ef --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/mapper/TimeSeriesIdFieldMapperTests.java @@ -0,0 +1,573 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.io.stream.ByteArrayStreamInput; +import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +import static io.github.nik9000.mapmatcher.MapMatcher.assertMap; +import static io.github.nik9000.mapmatcher.MapMatcher.matchesMap; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +public class TimeSeriesIdFieldMapperTests extends MetadataMapperTestCase { + + @Override + protected String fieldName() { + return TimeSeriesIdFieldMapper.NAME; + } + + @Override + protected void registerParameters(ParameterChecker checker) throws IOException { + // There aren't any parameters + } + + private DocumentMapper createDocumentMapper(String routingPath, XContentBuilder mappings) throws IOException { + return createMapperService( + getIndexSettingsBuilder().put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES.name()) + .put(MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING.getKey(), 200) // Increase dimension limit + .put(IndexMetadata.INDEX_ROUTING_PATH.getKey(), routingPath) + .build(), + mappings + ).documentMapper(); + } + + private ParsedDocument parseDocument(DocumentMapper docMapper, CheckedFunction f) + throws IOException { + // Add the @timestamp field required by DataStreamTimestampFieldMapper for all time series indices + return docMapper.parse(source(b -> f.apply(b).field("@timestamp", "2021-10-01"))); + } + + public void testEnabledInTimeSeriesMode() throws Exception { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "long").field("time_series_dimension", true).endObject(); + })); + + ParsedDocument doc = parseDocument(docMapper, b -> b.field("a", "value").field("b", 100).field("c", 500)); + assertThat( + doc.rootDoc().getBinaryValue("_tsid"), + equalTo(new BytesRef("\u0002\u0001as\u0005value\u0001bl\u0000\u0000\u0000\u0000\u0000\u0000\u0000d")) + ); + assertThat(doc.rootDoc().getField("a").binaryValue(), equalTo(new BytesRef("value"))); + assertThat(doc.rootDoc().getField("b").numericValue(), equalTo(100L)); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", "value").entry("b", 100L) + ); + } + + public void testDisabledInStandardMode() throws Exception { + DocumentMapper docMapper = createMapperService( + getIndexSettingsBuilder().put(IndexSettings.MODE.getKey(), IndexMode.STANDARD.name()).build(), + mapping(b -> {}) + ).documentMapper(); + assertThat(docMapper.metadataMapper(TimeSeriesIdFieldMapper.class), is(nullValue())); + + ParsedDocument doc = docMapper.parse(source(b -> b.field("field", "value"))); + assertThat(doc.rootDoc().getBinaryValue("_tsid"), is(nullValue())); + assertThat(doc.rootDoc().get("field"), equalTo("value")); + } + + public void testIncludeInDocumentNotAllowed() throws Exception { + DocumentMapper docMapper = createDocumentMapper( + "a", + mapping(b -> { b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); }) + ); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("_tsid", "foo"))); + + assertThat(e.getCause().getMessage(), containsString("Field [_tsid] is a metadata field and cannot be added inside a document")); + } + + /** + * Test with non-randomized string for sanity checking. + */ + public void testStrings() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("o") + .startObject("properties") + .startObject("e") + .field("type", "keyword") + .field("time_series_dimension", true) + .endObject() + .endObject() + .endObject(); + })); + + ParsedDocument doc = parseDocument( + docMapper, + b -> b.field("a", "foo").field("b", "bar").field("c", "baz").startObject("o").field("e", "bort").endObject() + ); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", "foo").entry("o.e", "bort") + ); + } + + public void testKeywordTooLong() throws IOException { + DocumentMapper docMapper = createDocumentMapper( + "a", + mapping(b -> { b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); }) + ); + + Exception e = expectThrows( + MapperParsingException.class, + () -> parseDocument(docMapper, b -> b.field("a", "more_than_1024_bytes".repeat(52)).field("@timestamp", "2021-10-01")) + ); + assertThat(e.getCause().getMessage(), equalTo("Dimension fields must be less than [1024] bytes but was [1040].")); + } + + public void testKeywordTooLongUtf8() throws IOException { + DocumentMapper docMapper = createDocumentMapper( + "a", + mapping(b -> { b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); }) + ); + + String theWordLong = "長い"; + Exception e = expectThrows( + MapperParsingException.class, + () -> parseDocument(docMapper, b -> b.field("a", theWordLong.repeat(200)).field("@timestamp", "2021-10-01")) + ); + assertThat(e.getCause().getMessage(), equalTo("Dimension fields must be less than [1024] bytes but was [1200].")); + } + + public void testKeywordNull() throws IOException { + DocumentMapper docMapper = createDocumentMapper( + "a", + mapping(b -> { b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); }) + ); + + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", (String) null))); + assertThat(e.getCause().getMessage(), equalTo("Dimension fields are missing.")); + } + + /** + * Test with non-randomized longs for sanity checking. + */ + public void testLong() throws IOException { + DocumentMapper docMapper = createDocumentMapper("kw", mapping(b -> { + b.startObject("kw").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("a").field("type", "long").field("time_series_dimension", true).endObject(); + b.startObject("o") + .startObject("properties") + .startObject("e") + .field("type", "long") + .field("time_series_dimension", true) + .endObject() + .endObject() + .endObject(); + })); + + ParsedDocument doc = parseDocument( + docMapper, + b -> b.field("a", 1L).field("b", -1).field("c", "baz").startObject("o").field("e", 1234).endObject() + ); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", 1L).entry("o.e", 1234L) + ); + } + + public void testLongInvalidString() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "long").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", "not_a_long"))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [long] in document with id '1'. Preview of field's value: 'not_a_long'") + ); + } + + public void testLongNull() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "long").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", (Long) null))); + assertThat(e.getCause().getMessage(), equalTo("Dimension fields are missing.")); + } + + /** + * Test with non-randomized integers for sanity checking. + */ + public void testInteger() throws IOException { + DocumentMapper docMapper = createDocumentMapper("kw", mapping(b -> { + b.startObject("kw").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("a").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("o") + .startObject("properties") + .startObject("e") + .field("type", "integer") + .field("time_series_dimension", true) + .endObject() + .endObject() + .endObject(); + })); + + ParsedDocument doc = parseDocument( + docMapper, + b -> b.field("a", 1L).field("b", -1).field("c", "baz").startObject("o").field("e", Integer.MIN_VALUE).endObject() + ); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", 1L).entry("o.e", (long) Integer.MIN_VALUE) + ); + } + + public void testIntegerInvalidString() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", "not_an_int"))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [integer] in document with id '1'. Preview of field's value: 'not_an_int'") + ); + } + + public void testIntegerOutOfRange() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", Long.MAX_VALUE))); + assertThat( + e.getMessage(), + equalTo( + "failed to parse field [a] of type [integer] in document with id '1'. Preview of field's value: '" + Long.MAX_VALUE + "'" + ) + ); + } + + /** + * Test with non-randomized shorts for sanity checking. + */ + public void testShort() throws IOException { + DocumentMapper docMapper = createDocumentMapper("kw", mapping(b -> { + b.startObject("kw").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("a").field("type", "short").field("time_series_dimension", true).endObject(); + b.startObject("o") + .startObject("properties") + .startObject("e") + .field("type", "short") + .field("time_series_dimension", true) + .endObject() + .endObject() + .endObject(); + })); + + ParsedDocument doc = parseDocument( + docMapper, + b -> b.field("a", 1L).field("b", -1).field("c", "baz").startObject("o").field("e", Short.MIN_VALUE).endObject() + ); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", 1L).entry("o.e", (long) Short.MIN_VALUE) + ); + } + + public void testShortInvalidString() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "short").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", "not_a_short"))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [short] in document with id '1'. Preview of field's value: 'not_a_short'") + ); + } + + public void testShortOutOfRange() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "short").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", Long.MAX_VALUE))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [short] in document with id '1'. Preview of field's value: '" + Long.MAX_VALUE + "'") + ); + } + + /** + * Test with non-randomized shorts for sanity checking. + */ + public void testByte() throws IOException { + DocumentMapper docMapper = createDocumentMapper("kw", mapping(b -> { + b.startObject("kw").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("a").field("type", "byte").field("time_series_dimension", true).endObject(); + b.startObject("o") + .startObject("properties") + .startObject("e") + .field("type", "byte") + .field("time_series_dimension", true) + .endObject() + .endObject() + .endObject(); + })); + + ParsedDocument doc = parseDocument( + docMapper, + b -> b.field("a", 1L).field("b", -1).field("c", "baz").startObject("o").field("e", (int) Byte.MIN_VALUE).endObject() + ); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", 1L).entry("o.e", (long) Byte.MIN_VALUE) + ); + } + + public void testByteInvalidString() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "byte").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", "not_a_byte"))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [byte] in document with id '1'. Preview of field's value: 'not_a_byte'") + ); + } + + public void testByteOutOfRange() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "byte").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", Long.MAX_VALUE))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [byte] in document with id '1'. Preview of field's value: '" + Long.MAX_VALUE + "'") + ); + } + + /** + * Test with non-randomized ips for sanity checking. + */ + public void testIp() throws IOException { + DocumentMapper docMapper = createDocumentMapper("kw", mapping(b -> { + b.startObject("kw").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("a").field("type", "ip").field("time_series_dimension", true).endObject(); + b.startObject("o") + .startObject("properties") + .startObject("e") + .field("type", "ip") + .field("time_series_dimension", true) + .endObject() + .endObject() + .endObject(); + })); + + ParsedDocument doc = parseDocument( + docMapper, + b -> b.field("a", "192.168.0.1").field("b", -1).field("c", "baz").startObject("o").field("e", "255.255.255.1").endObject() + ); + assertMap( + TimeSeriesIdFieldMapper.decodeTsid(new ByteArrayStreamInput(doc.rootDoc().getBinaryValue("_tsid").bytes)), + matchesMap().entry("a", "192.168.0.1").entry("o.e", "255.255.255.1") + ); + } + + public void testIpInvalidString() throws IOException { + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("a").field("type", "ip").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + })); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> b.field("a", "not_an_ip"))); + assertThat( + e.getMessage(), + equalTo("failed to parse field [a] of type [ip] in document with id '1'. Preview of field's value: 'not_an_ip'") + ); + } + + /** + * Tests when the total of the tsid is more than 32k. + */ + public void testVeryLarge() throws IOException { + // By default, only 16 dimension fields are allowed. To support 100 dimension fields + // we must increase 'index.mapping.dimension_fields.limit' + DocumentMapper docMapper = createDocumentMapper("b", mapping(b -> { + b.startObject("b").field("type", "keyword").field("time_series_dimension", true).endObject(); + for (int i = 0; i < 100; i++) { + b.startObject("d" + i).field("type", "keyword").field("time_series_dimension", true).endObject(); + } + })); + + String large = "many words ".repeat(80); + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, b -> { + for (int i = 0; i < 100; i++) { + b.field("d" + i, large); + } + return b; + })); + assertThat(e.getCause().getMessage(), equalTo("_tsid longer than [32766] bytes [88691].")); + } + + /** + * Sending the same document twice produces the same value. + */ + public void testSameGenConsistentForSameDoc() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "long").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + int b = between(1, 100); + int c = between(0, 2); + CheckedFunction fields = d -> d.field("a", a).field("b", b).field("c", (long) c); + ParsedDocument doc1 = parseDocument(docMapper, fields); + ParsedDocument doc2 = parseDocument(docMapper, fields); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, equalTo(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + /** + * Non-dimension fields don't influence the value of _tsid. + */ + public void testExtraFieldsDoNotMatter() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "long").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + int b = between(1, 100); + int c = between(0, 2); + ParsedDocument doc1 = parseDocument( + docMapper, + d -> d.field("a", a).field("b", b).field("c", (long) c).field("e", between(10, 100)) + ); + ParsedDocument doc2 = parseDocument( + docMapper, + d -> d.field("a", a).field("b", b).field("c", (long) c).field("e", between(50, 200)) + ); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, equalTo(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + /** + * The order that the dimensions appear in the document do not influence the value of _tsid. + */ + public void testOrderDoesNotMatter() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "long").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + int b = between(1, 100); + int c = between(0, 2); + ParsedDocument doc1 = parseDocument(docMapper, d -> d.field("a", a).field("b", b).field("c", (long) c)); + ParsedDocument doc2 = parseDocument(docMapper, d -> d.field("b", b).field("a", a).field("c", (long) c)); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, equalTo(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + /** + * Dimensions that appear in the mapping but not in the document don't influence the value of _tsid. + */ + public void testUnusedExtraDimensions() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "long").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + int b = between(1, 100); + CheckedFunction fields = d -> d.field("a", a).field("b", b); + ParsedDocument doc1 = parseDocument(docMapper, fields); + ParsedDocument doc2 = parseDocument(docMapper, fields); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, equalTo(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + /** + * Different values for dimensions change the result. + */ + public void testDifferentValues() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + ParsedDocument doc1 = parseDocument(docMapper, d -> d.field("a", a).field("b", between(1, 100))); + ParsedDocument doc2 = parseDocument(docMapper, d -> d.field("a", a + 1).field("b", between(200, 300))); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, not(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + /** + * Two documents with the same *values* but different dimension keys will generate + * different {@code _tsid}s. + */ + public void testDifferentDimensions() throws IOException { + // First doc mapper has dimension fields a and b + DocumentMapper docMapper1 = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + })); + // Second doc mapper has dimension fields a and c + DocumentMapper docMapper2 = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "integer").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + int b = between(1, 100); + int c = between(5, 500); + CheckedFunction fields = d -> d.field("a", a).field("b", b).field("c", c); + ParsedDocument doc1 = parseDocument(docMapper1, fields); + ParsedDocument doc2 = parseDocument(docMapper2, fields); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, not(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + /** + * Documents with fewer dimensions have a different value. + */ + public void testFewerDimensions() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "integer").field("time_series_dimension", true).endObject(); + })); + + String a = randomAlphaOfLength(10); + int b = between(1, 100); + int c = between(5, 500); + ParsedDocument doc1 = parseDocument(docMapper, d -> d.field("a", a).field("b", b)); + ParsedDocument doc2 = parseDocument(docMapper, d -> d.field("a", a).field("b", b).field("c", c)); + assertThat(doc1.rootDoc().getBinaryValue("_tsid").bytes, not(doc2.rootDoc().getBinaryValue("_tsid").bytes)); + } + + public void testEmpty() throws IOException { + DocumentMapper docMapper = createDocumentMapper("a", mapping(b -> { + b.startObject("a").field("type", "keyword").field("time_series_dimension", true).endObject(); + b.startObject("b").field("type", "integer").field("time_series_dimension", true).endObject(); + b.startObject("c").field("type", "integer").field("time_series_dimension", true).endObject(); + })); + + Exception e = expectThrows(MapperParsingException.class, () -> parseDocument(docMapper, d -> d)); + assertThat(e.getCause().getMessage(), equalTo("Dimension fields are missing.")); + } +} diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java index aa92d21591a35..7848cf942eb9d 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesModuleTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.TextFieldMapper; +import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.ESTestCase; @@ -73,6 +74,7 @@ public Map getMetadataMappers() { IgnoredFieldMapper.NAME, IdFieldMapper.NAME, RoutingFieldMapper.NAME, + TimeSeriesIdFieldMapper.NAME, IndexFieldMapper.NAME, SourceFieldMapper.NAME, NestedPathFieldMapper.NAME, diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index fe5fd25dc5927..5db980f7a2cc8 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -100,6 +100,10 @@ protected Settings getIndexSettings() { return SETTINGS; } + protected final Settings.Builder getIndexSettingsBuilder() { + return Settings.builder().put(getIndexSettings()); + } + protected IndexAnalyzers createIndexAnalyzers(IndexSettings indexSettings) { return createIndexAnalyzers(); } From 6d9aaf82409e0e7da7e393abf1a23a5c01b5e98f Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Mon, 29 Nov 2021 13:22:22 +0200 Subject: [PATCH 63/88] =?UTF-8?q?[ML]=20Improve=20error=20msg=20on=20start?= =?UTF-8?q?ing=20scrolling=20datafeed=20with=20no=20matchin=E2=80=A6=20(#8?= =?UTF-8?q?1069)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If a scrolling datafeed has an index pattern that matches no indices, starting the datafeed fails with a message about the time field having no mappings. This commit impvoves this by informing the user on the actual cause of the error which is that no index matches the datafeed's indices. Relates #81013 --- .../scroll/ScrollDataExtractorFactory.java | 10 +++++++ .../extractor/DataExtractorFactoryTests.java | 28 +++++++++++++++++++ .../test/ml/start_stop_datafeed.yml | 21 +++++++++++++- 3 files changed, 58 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java index dc117ba5ffe04..f343389afb978 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java @@ -79,6 +79,16 @@ public static void create( // Step 2. Contruct the factory and notify listener ActionListener fieldCapabilitiesHandler = ActionListener.wrap(fieldCapabilitiesResponse -> { + if (fieldCapabilitiesResponse.getIndices().length == 0) { + listener.onFailure( + ExceptionsHelper.badRequestException( + "datafeed [{}] cannot retrieve data because no index matches datafeed's indices {}", + datafeed.getId(), + datafeed.getIndices() + ) + ); + return; + } TimeBasedExtractedFields extractedFields = TimeBasedExtractedFields.build(job, datafeed, fieldCapabilitiesResponse); listener.onResponse( new ScrollDataExtractorFactory(client, datafeed, job, extractedFields, xContentRegistry, timingStatsReporter) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java index 25abb69f70a37..e68121b8767e5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java @@ -81,6 +81,7 @@ public void setUpTests() { when(client.threadPool()).thenReturn(threadPool); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); fieldsCapabilities = mock(FieldCapabilitiesResponse.class); + when(fieldsCapabilities.getIndices()).thenReturn(new String[] { "test_index_1" }); givenAggregatableField("time", "date"); givenAggregatableField("field", "keyword"); @@ -100,6 +101,33 @@ public void setUpTests() { }).when(client).execute(same(GetRollupIndexCapsAction.INSTANCE), any(), any()); } + public void testCreateDataExtractorFactoryGivenDefaultScrollAndNoMatchingIndices() { + when(fieldsCapabilities.getIndices()).thenReturn(new String[0]); + + DataDescription.Builder dataDescription = new DataDescription.Builder(); + dataDescription.setTimeField("time"); + Job.Builder jobBuilder = DatafeedRunnerTests.createDatafeedJob(); + jobBuilder.setDataDescription(dataDescription); + DatafeedConfig datafeedConfig = DatafeedRunnerTests.createDatafeedConfig("datafeed1", "foo").build(); + + ActionListener listener = ActionListener.wrap( + dataExtractorFactory -> fail("factory creation should have failed as there are no matching indices"), + e -> assertThat( + e.getMessage(), + equalTo("datafeed [datafeed1] cannot retrieve data because no index " + "matches datafeed's indices [myIndex]") + ) + ); + + DataExtractorFactory.create( + client, + datafeedConfig, + jobBuilder.build(new Date()), + xContentRegistry(), + timingStatsReporter, + listener + ); + } + public void testCreateDataExtractorFactoryGivenDefaultScroll() { DataDescription.Builder dataDescription = new DataDescription.Builder(); dataDescription.setTimeField("time"); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/start_stop_datafeed.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/start_stop_datafeed.yml index ed9de0e09b57c..72dc65220d240 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/start_stop_datafeed.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/start_stop_datafeed.yml @@ -177,7 +177,7 @@ setup: end: "2017-02-01T01:00:00Z" --- -"Test start given datafeed index does not exist": +"Test start datafeed given concrete index that does not exist": - do: ml.update_datafeed: datafeed_id: start-stop-datafeed-datafeed-1 @@ -195,6 +195,25 @@ setup: ml.start_datafeed: datafeed_id: "start-stop-datafeed-datafeed-1" +--- +"Test start datafeed given index pattern with no matching indices": + - do: + ml.update_datafeed: + datafeed_id: start-stop-datafeed-datafeed-1 + body: > + { + "indexes":["utopia*"] + } + + - do: + ml.open_job: + job_id: "start-stop-datafeed-job" + + - do: + catch: /datafeed \[start-stop-datafeed-datafeed-1] cannot retrieve data because no index matches datafeed's indices \[utopia\*\]/ + ml.start_datafeed: + datafeed_id: "start-stop-datafeed-datafeed-1" + --- "Test start given field without mappings": - do: From d9e73eb441a6a3b1c1a62a4a49b6f7ce8e7d9f4e Mon Sep 17 00:00:00 2001 From: David Roberts Date: Mon, 29 Nov 2021 11:45:26 +0000 Subject: [PATCH 64/88] [ML] Mute ml/inference_crud/Test force delete given model referenced by pipeline (#81093) Due to https://github.com/elastic/elasticsearch/issues/80703 --- .../resources/rest-api-spec/test/ml/inference_crud.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_crud.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_crud.yml index 8faf5de9df8d2..71a1ef09943e2 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_crud.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/inference_crud.yml @@ -589,6 +589,10 @@ setup: --- "Test force delete given model referenced by pipeline": + - skip: + version: all + reason: "@AwaitsFix https://github.com/elastic/elasticsearch/issues/80703" + - do: ingest.put_pipeline: id: "pipeline-using-a-classification-model" From e54438004504d543673613fb6bce5ef90dca9fb1 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Mon, 29 Nov 2021 14:19:34 +0200 Subject: [PATCH 65/88] =?UTF-8?q?[ML]=20Allow=20datafeed=20start=20with=20?= =?UTF-8?q?remote=20indices=20despite=20local=20index=20pat=E2=80=A6=20(#8?= =?UTF-8?q?1074)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a datafeed is assigned we check that there are indices with fully assigned shards. However, in the scenario where the datafeed has a mix of local and remote index patterns, when the local index patterns do not match any index, results to failure to assign the datafeed. This should not be the behaviour. As the datafeed also has remote indices we should allow starting the datafeed. This commit fixes this by skipping the check that local index patterns produce matching indices when there are remote indices too. Closes #81013 --- .../ml/datafeed/DatafeedNodeSelector.java | 5 +++- .../datafeed/DatafeedNodeSelectorTests.java | 23 +++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index e0785d3bda106..54bdabb64a3d8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -168,6 +168,7 @@ private AssignmentFailure checkAssignment() { @Nullable private AssignmentFailure verifyIndicesActive() { + boolean hasRemoteIndices = datafeedIndices.stream().anyMatch(RemoteClusterLicenseChecker::isRemoteIndex); String[] index = datafeedIndices.stream() // We cannot verify remote indices .filter(i -> RemoteClusterLicenseChecker.isRemoteIndex(i) == false) @@ -177,7 +178,9 @@ private AssignmentFailure verifyIndicesActive() { try { concreteIndices = resolver.concreteIndexNames(clusterState, indicesOptions, true, index); - if (concreteIndices.length == 0) { + + // If we have remote indices we cannot check those. We should not fail as they may contain data. + if (hasRemoteIndices == false && concreteIndices.length == 0) { return new AssignmentFailure( "cannot start datafeed [" + datafeedId diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index fe6bb9e34683c..c9898e50d997f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -390,6 +390,29 @@ public void testIndexPatternDoesntExist() { .checkDatafeedTaskCanBeCreated(); } + public void testLocalIndexPatternWithoutMatchingIndicesAndRemoteIndexPattern() { + Job job = createScheduledJob("job_id").build(new Date()); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Arrays.asList("missing-*", "remote:index-*")); + + PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); + tasks = tasksBuilder.build(); + + givenClusterState("foo", 1, 0); + + PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + clusterState, + resolver, + df.getId(), + df.getJobId(), + df.getIndices(), + SearchRequest.DEFAULT_INDICES_OPTIONS + ).selectNode(makeCandidateNodes("node_id", "other_node_id")); + assertEquals("node_id", result.getExecutorNode()); + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices(), SearchRequest.DEFAULT_INDICES_OPTIONS) + .checkDatafeedTaskCanBeCreated(); + } + public void testRemoteIndex() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("remote:foo")); From 92b6b6f1b28cc1c98ff7cc15733b4ef8bd26805d Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 29 Nov 2021 13:08:02 +0000 Subject: [PATCH 66/88] [ML] Make inference timeout test more reliable (#81094) --- .../xpack/ml/integration/PyTorchModelIT.java | 11 +++++++++-- .../ml/inference/deployment/DeploymentManager.java | 2 +- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java index 573269671498b..776a94254aeb7 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java @@ -200,8 +200,15 @@ public void testEvaluateWithMinimalTimeout() throws IOException { putModelDefinition(modelId); putVocabulary(List.of("these", "are", "my", "words"), modelId); startDeployment(modelId); - ResponseException ex = expectThrows(ResponseException.class, () -> infer("my words", modelId, TimeValue.ZERO)); - assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(429)); + // There is a race between inference and timeout so that + // even with a zero timeout a valid inference response may + // be returned. + // The test asserts that if an error occurs it is a timeout error + try { + infer("my words", modelId, TimeValue.ZERO); + } catch (ResponseException ex) { + assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(408)); + } stopDeployment(modelId); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index 2d3d07d06c81d..27ecbfbe5959b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -297,7 +297,7 @@ void onTimeout() { if (notified.compareAndSet(false, true)) { processContext.getResultProcessor().ignoreResposeWithoutNotifying(String.valueOf(requestId)); listener.onFailure( - new ElasticsearchStatusException("timeout [{}] waiting for inference result", RestStatus.TOO_MANY_REQUESTS, timeout) + new ElasticsearchStatusException("timeout [{}] waiting for inference result", RestStatus.REQUEST_TIMEOUT, timeout) ); return; } From 31011184408a1c3799635c902307f10a10477191 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 29 Nov 2021 14:03:22 +0000 Subject: [PATCH 67/88] [ML] Fix incorrect logging of unexpected model size error (#81089) --- .../pytorch/process/PyTorchStateStreamer.java | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchStateStreamer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchStateStreamer.java index a60c681ca2b02..562361bffff51 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchStateStreamer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchStateStreamer.java @@ -37,12 +37,16 @@ public class PyTorchStateStreamer { private static final Logger logger = LogManager.getLogger(PyTorchStateStreamer.class); + /** The size of the data written before the model definition */ + private static final int NUM_BYTES_IN_PRELUDE = 4; + private final OriginSettingClient client; private final ExecutorService executorService; private final NamedXContentRegistry xContentRegistry; private volatile boolean isCancelled; private volatile int modelSize = -1; - private final AtomicInteger bytesWritten = new AtomicInteger(); + // model bytes only, does not include the prelude + private final AtomicInteger modelBytesWritten = new AtomicInteger(); public PyTorchStateStreamer(Client client, ExecutorService executorService, NamedXContentRegistry xContentRegistry) { this.client = new OriginSettingClient(Objects.requireNonNull(client), ML_ORIGIN); @@ -59,7 +63,7 @@ public void cancel() { /** * First writes the size of the model so the native process can - * allocated memory then writes the chunks of binary state. + * allocate memory then writes the chunks of binary state. * * @param modelId The model to write * @param index The index to search for the model @@ -72,11 +76,11 @@ public void writeStateToStream(String modelId, String index, OutputStream restor restorer.setSearchSize(1); restorer.restoreModelDefinition(doc -> writeChunk(doc, restoreStream), success -> { logger.debug("model [{}] state restored in [{}] documents from index [{}]", modelId, restorer.getNumDocsWritten(), index); - if (bytesWritten.get() != modelSize) { + if (modelBytesWritten.get() != modelSize) { logger.error( "model [{}] restored state size [{}] does not equal the expected model size [{}]", modelId, - bytesWritten, + modelBytesWritten, modelSize ); } @@ -96,7 +100,7 @@ private boolean writeChunk(TrainedModelDefinitionDoc doc, OutputStream outputStr // The array backing the BytesReference may be bigger than what is // referred to so write only what is after the offset outputStream.write(doc.getBinaryData().array(), doc.getBinaryData().arrayOffset(), doc.getBinaryData().length()); - bytesWritten.addAndGet(doc.getBinaryData().length()); + modelBytesWritten.addAndGet(doc.getBinaryData().length()); return true; } @@ -139,12 +143,10 @@ private int writeModelSize(String modelId, Long modelSizeBytes, OutputStream out throw new IllegalStateException(message); } - final int NUM_BYTES = 4; - ByteBuffer lengthBuffer = ByteBuffer.allocate(NUM_BYTES); + ByteBuffer lengthBuffer = ByteBuffer.allocate(NUM_BYTES_IN_PRELUDE); lengthBuffer.putInt(modelSizeBytes.intValue()); outputStream.write(lengthBuffer.array()); - bytesWritten.addAndGet(NUM_BYTES); return modelSizeBytes.intValue(); } } From 7a04ec68aed6d02fcf205913607c8b3f0406d67f Mon Sep 17 00:00:00 2001 From: Keith Massey Date: Mon, 29 Nov 2021 08:06:28 -0600 Subject: [PATCH 68/88] Extending the timeout waiting for snapshot to be ready (#81018) This commit extends the timeout in SnapshotLifecycleRestIT::testBasicTimeBasedRetention for waiting for a snapshot to be ready from 10 seconds to 60 seconds to avoid occasional failures. Closes #79549 --- .../org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java index d207c383d652c..b0c05737c5d72 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleRestIT.java @@ -437,7 +437,7 @@ public void testBasicTimeBasedRetention() throws Exception { } catch (ResponseException e) { fail("expected snapshot to exist but it does not: " + EntityUtils.toString(e.getResponse().getEntity())); } - }); + }, 60, TimeUnit.SECONDS); // Run retention every second ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest(); From 1abbf4b387e74559fde2e20d634fccb38f20947e Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 29 Nov 2021 14:33:42 +0000 Subject: [PATCH 69/88] [ML] Add logging for failing PyTorch test (#81044) For #80819 --- .../xpack/ml/integration/PyTorchModelIT.java | 12 +++++++----- .../ml/action/TransportGetDeploymentStatsAction.java | 12 +++++++----- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java index 776a94254aeb7..ab05d2a2b0527 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java @@ -241,20 +241,22 @@ public void testDeploymentStats() throws IOException { CheckedBiConsumer assertAtLeast = (modelId, state) -> { startDeployment(modelId, state.toString()); Response response = getTrainedModelStats(modelId); - List> stats = (List>) entityAsMap(response).get("trained_model_stats"); + var responseMap = entityAsMap(response); + List> stats = (List>) responseMap.get("trained_model_stats"); assertThat(stats, hasSize(1)); String statusState = (String) XContentMapValues.extractValue("deployment_stats.allocation_status.state", stats.get(0)); - assertThat(stats.toString(), statusState, is(not(nullValue()))); + assertThat(responseMap.toString(), statusState, is(not(nullValue()))); assertThat(AllocationStatus.State.fromString(statusState), greaterThanOrEqualTo(state)); Integer byteSize = (Integer) XContentMapValues.extractValue("deployment_stats.model_size_bytes", stats.get(0)); - assertThat(byteSize, is(not(nullValue()))); + assertThat(responseMap.toString(), byteSize, is(not(nullValue()))); assertThat(byteSize, equalTo((int) RAW_MODEL_SIZE)); Response humanResponse = client().performRequest(new Request("GET", "/_ml/trained_models/" + modelId + "/_stats?human")); - stats = (List>) entityAsMap(humanResponse).get("trained_model_stats"); + var humanResponseMap = entityAsMap(humanResponse); + stats = (List>) humanResponseMap.get("trained_model_stats"); assertThat(stats, hasSize(1)); String stringBytes = (String) XContentMapValues.extractValue("deployment_stats.model_size", stats.get(0)); - assertThat(stringBytes, is(not(nullValue()))); + assertThat("stats response: " + responseMap + " human stats response" + humanResponseMap, stringBytes, is(not(nullValue()))); assertThat(stringBytes, equalTo("1.5kb")); stopDeployment(model); }; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java index 6a995e78ed8b2..5490ded56d7ea 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.ml.action; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; @@ -54,6 +56,8 @@ public class TransportGetDeploymentStatsAction extends TransportTasksAction< GetDeploymentStatsAction.Response, AllocationStats> { + private static final Logger logger = LogManager.getLogger(TransportGetDeploymentStatsAction.class); + @Inject public TransportGetDeploymentStatsAction( TransportService transportService, @@ -129,9 +133,6 @@ protected void doExecute( } } - // check request has been satisfied - ExpandedIdsMatcher requiredIdsMatcher = new ExpandedIdsMatcher(tokenizedRequestIds, true); - requiredIdsMatcher.filterMatchedIds(matchedDeploymentIds); if (matchedDeploymentIds.isEmpty()) { listener.onResponse( new GetDeploymentStatsAction.Response(Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), 0L) @@ -154,8 +155,7 @@ protected void doExecute( .collect(Collectors.toList()); // Set the allocation state and reason if we have it for (AllocationStats stats : updatedResponse.getStats().results()) { - Optional modelAllocation = Optional.ofNullable(allocation.getModelAllocation(stats.getModelId())); - TrainedModelAllocation trainedModelAllocation = modelAllocation.orElse(null); + TrainedModelAllocation trainedModelAllocation = allocation.getModelAllocation(stats.getModelId()); if (trainedModelAllocation != null) { stats.setState(trainedModelAllocation.getAllocationState()).setReason(trainedModelAllocation.getReason().orElse(null)); if (trainedModelAllocation.getAllocationState().isAnyOf(AllocationState.STARTED, AllocationState.STARTING)) { @@ -274,6 +274,8 @@ static GetDeploymentStatsAction.Response addFailedRoutes( nodeStats.sort(Comparator.comparing(n -> n.getNode().getId())); + // debug logging added for https://github.com/elastic/elasticsearch/issues/80819 + logger.debug("[{}] deployment stats for non-started deployment", modelId); updatedAllocationStats.add(new AllocationStats(modelId, null, null, null, null, allocation.getStartTime(), nodeStats)); } } From 34ae3dd5b13e01878e07ecd494d0be2c92676a8f Mon Sep 17 00:00:00 2001 From: David Roberts Date: Mon, 29 Nov 2021 14:49:52 +0000 Subject: [PATCH 70/88] [ML] Fix acceptable model snapshot versions in ML deprecation checker (#81060) This is a followup to #81039. The same requirement to tolerate model snapshots back to 6.4.0 that applies to the job opening code also applies to the deprecation checker. Again, we tell the user that 7.0.0 is the model snapshot version we support, but we actually have to support versions going back to 6.4.0 because we didn't update the constant in the C++ in 7.0.0. Additionally, the wording of the ML deprecation messages is very slightly updated. The messages are different in the 7.16 branch, where they were updated by #79387. This wording is copied forward to master, but with the tiny change that "Snapshot" is changed to "Model snapshot" in one place. This should make it clearer for users that we're talking about ML model snapshots and not cluster snapshots (which are completely different things). Another reason to change the wording is that the UI is looking for the pattern /[Mm]odel snapshot/ to decide when to display the "Fix" button for upgrading ML model snapshots - see elastic/kibana#119745. --- .../xpack/core/ml/MachineLearningField.java | 8 ++++++++ .../xpack/deprecation/MlDeprecationIT.java | 2 +- .../deprecation/MlDeprecationChecker.java | 19 +++++++++++-------- .../ml/action/TransportOpenJobAction.java | 4 ++-- .../task/OpenJobPersistentTasksExecutor.java | 8 ++------ 5 files changed, 24 insertions(+), 17 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java index 3cc7275aafff4..ba24e7eb5a5d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningField.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.core.ml; +import org.elasticsearch.Version; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.settings.Setting; @@ -44,6 +45,13 @@ public final class MachineLearningField { License.OperationMode.PLATINUM ); + // Ideally this would be 7.0.0, but it has to be 6.4.0 because due to an oversight it's impossible + // for the Java code to distinguish the model states for versions 6.4.0 to 7.9.3 inclusive. + public static final Version MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION = Version.fromString("6.4.0"); + // We tell the user we support model snapshots newer than 7.0.0 as that's the major version + // boundary, even though behind the scenes we have to support back to 6.4.0. + public static final Version MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION = Version.V_7_0_0; + private MachineLearningField() {} public static String valuesToId(String... values) { diff --git a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java index b2162179363b4..3fc880adcf235 100644 --- a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java +++ b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java @@ -105,7 +105,7 @@ public void testMlDeprecationChecks() throws Exception { assertThat(response.getMlSettingsIssues(), hasSize(1)); assertThat( response.getMlSettingsIssues().get(0).getMessage(), - containsString("model snapshot [1] for job [deprecation_check_job] needs to be deleted or upgraded") + containsString("Delete model snapshot [1] or update it to 7.0.0 or greater") ); assertThat(response.getMlSettingsIssues().get(0).getMeta(), equalTo(Map.of("job_id", jobId, "snapshot_id", "1"))); } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java index f04aa8c582367..36092a820844f 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.deprecation; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentElasticsearchExtension; @@ -27,6 +26,9 @@ import java.util.Map; import java.util.Optional; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION; + public class MlDeprecationChecker implements DeprecationChecker { static Optional checkDataFeedQuery(DatafeedConfig datafeedConfig, NamedXContentRegistry xContentRegistry) { @@ -67,22 +69,23 @@ static Optional checkDataFeedAggregations(DatafeedConfig dataf } static Optional checkModelSnapshot(ModelSnapshot modelSnapshot) { - if (modelSnapshot.getMinVersion().before(Version.V_7_0_0)) { + if (modelSnapshot.getMinVersion().before(MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION)) { StringBuilder details = new StringBuilder( String.format( Locale.ROOT, - "model snapshot [%s] for job [%s] supports minimum version [%s] and needs to be at least [%s].", + // Important: the Kibana upgrade assistant expects this to match the pattern /[Mm]odel snapshot/ + // and if it doesn't then the expected "Fix" button won't appear for this deprecation. + "Model snapshot [%s] for job [%s] has an obsolete minimum version [%s].", modelSnapshot.getSnapshotId(), modelSnapshot.getJobId(), - modelSnapshot.getMinVersion(), - Version.V_7_0_0 + modelSnapshot.getMinVersion() ) ); if (modelSnapshot.getLatestRecordTimeStamp() != null) { details.append( String.format( Locale.ROOT, - " The model snapshot's latest record timestamp is [%s]", + " The model snapshot's latest record timestamp is [%s].", XContentElasticsearchExtension.DEFAULT_FORMATTER.format(modelSnapshot.getLatestRecordTimeStamp().toInstant()) ) ); @@ -92,9 +95,9 @@ static Optional checkModelSnapshot(ModelSnapshot modelSnapshot DeprecationIssue.Level.CRITICAL, String.format( Locale.ROOT, - "model snapshot [%s] for job [%s] needs to be deleted or upgraded", + "Delete model snapshot [%s] or update it to %s or greater.", modelSnapshot.getSnapshotId(), - modelSnapshot.getJobId() + MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION ), "https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-upgrade-job-model-snapshot.html", details.toString(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 3cc30e813ccdd..4a58924651e35 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -54,8 +54,8 @@ import java.util.function.Predicate; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION; -import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION; import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.checkAssignmentState; /* diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java index 6c8ea314dd153..b182ead4ff869 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java @@ -67,6 +67,8 @@ import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION; +import static org.elasticsearch.xpack.core.ml.MachineLearningField.MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION; import static org.elasticsearch.xpack.core.ml.MlTasks.AWAITING_UPGRADE; import static org.elasticsearch.xpack.core.ml.MlTasks.PERSISTENT_TASK_MASTER_NODE_TIMEOUT; import static org.elasticsearch.xpack.ml.job.JobNodeSelector.AWAITING_LAZY_ASSIGNMENT; @@ -74,12 +76,6 @@ public class OpenJobPersistentTasksExecutor extends AbstractJobPersistentTasksExecutor { private static final Logger logger = LogManager.getLogger(OpenJobPersistentTasksExecutor.class); - // Ideally this would be 7.0.0, but it has to be 6.4.0 because due to an oversight it's impossible - // for the Java code to distinguish the model states for versions 6.4.0 to 7.9.3 inclusive. - public static final Version MIN_CHECKED_SUPPORTED_SNAPSHOT_VERSION = Version.fromString("6.4.0"); - // We tell the user we support model snapshots newer than 7.0.0 as that's the major version - // boundary, even though behind the scenes we have to support back to 6.4.0. - public static final Version MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION = Version.V_7_0_0; // Resuming a job with a running datafeed from its current snapshot was added in 7.11 and // can only be done if the master node is on or after that version. From 3d0c9efb97041ce116233b7151eab5d1c153ad09 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Mon, 29 Nov 2021 17:13:28 +0200 Subject: [PATCH 71/88] [ML] Fix datafeed preview with remote indices (#81099) In #77109 a bug was fixed with regard to `date_nanos` time fields and the preview datafeed API. However, that fix introduces a new bug. As we are calling the field caps API to find out whether the time field is `date_nanos`, we are setting the datafeed indices on the request. This may result to erroneous behaviour on local indices and it certainly will result to an error if the datafeed's indices are remote. This commit fixes that problem by setting the datafeed's indices on the field caps request. --- .../TransportPreviewDatafeedAction.java | 11 +++++--- .../TransportPreviewDatafeedActionTests.java | 27 +++++++------------ 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java index c7d40b14e51a9..70f6e18d0dc19 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java @@ -119,7 +119,7 @@ private void previewDatafeed(DatafeedConfig datafeedConfig, Job job, ActionListe new DatafeedTimingStatsReporter(new DatafeedTimingStats(datafeedConfig.getJobId()), (ts, refreshPolicy) -> {}), listener.delegateFailure((l, dataExtractorFactory) -> { isDateNanos( - previewDatafeedConfig.getHeaders(), + previewDatafeedConfig, job.getDataDescription().getTimeField(), listener.delegateFailure((l2, isDateNanos) -> { DataExtractor dataExtractor = dataExtractorFactory.newExtractor( @@ -151,13 +151,16 @@ static DatafeedConfig.Builder buildPreviewDatafeed(DatafeedConfig datafeed) { return previewDatafeed; } - private void isDateNanos(Map headers, String timeField, ActionListener listener) { + private void isDateNanos(DatafeedConfig datafeed, String timeField, ActionListener listener) { + FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest(); + fieldCapabilitiesRequest.indices(datafeed.getIndices().toArray(new String[0])).indicesOptions(datafeed.getIndicesOptions()); + fieldCapabilitiesRequest.fields(timeField); executeWithHeadersAsync( - headers, + datafeed.getHeaders(), ML_ORIGIN, client, FieldCapabilitiesAction.INSTANCE, - new FieldCapabilitiesRequest().fields(timeField), + fieldCapabilitiesRequest, ActionListener.wrap(fieldCapsResponse -> { Map timeFieldCaps = fieldCapsResponse.getField(timeField); listener.onResponse(timeFieldCaps.keySet().contains(DateFieldMapper.DATE_NANOS_CONTENT_TYPE)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java index a28bdf214b43a..efc48fe5d279d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedActionTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; import org.junit.Before; -import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; import java.io.ByteArrayInputStream; @@ -51,21 +50,15 @@ public void setUpTests() { dataExtractor = mock(DataExtractor.class); actionListener = mock(ActionListener.class); - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) { - PreviewDatafeedAction.Response response = (PreviewDatafeedAction.Response) invocationOnMock.getArguments()[0]; - capturedResponse = response.toString(); - return null; - } + doAnswer((Answer) invocationOnMock -> { + PreviewDatafeedAction.Response response = (PreviewDatafeedAction.Response) invocationOnMock.getArguments()[0]; + capturedResponse = response.toString(); + return null; }).when(actionListener).onResponse(any()); - doAnswer(new Answer() { - @Override - public Void answer(InvocationOnMock invocationOnMock) { - capturedFailure = (Exception) invocationOnMock.getArguments()[0]; - return null; - } + doAnswer((Answer) invocationOnMock -> { + capturedFailure = (Exception) invocationOnMock.getArguments()[0]; + return null; }).when(actionListener).onFailure(any()); } @@ -95,7 +88,7 @@ public void testBuildPreviewDatafeed_GivenAggregations() { assertThat(previewDatafeed.getChunkingConfig(), equalTo(datafeed.build().getChunkingConfig())); } - public void testPreviewDatafed_GivenEmptyStream() throws IOException { + public void testPreviewDatafeed_GivenEmptyStream() throws IOException { when(dataExtractor.next()).thenReturn(Optional.empty()); TransportPreviewDatafeedAction.previewDatafeed(dataExtractor, actionListener); @@ -105,7 +98,7 @@ public void testPreviewDatafed_GivenEmptyStream() throws IOException { verify(dataExtractor).cancel(); } - public void testPreviewDatafed_GivenNonEmptyStream() throws IOException { + public void testPreviewDatafeed_GivenNonEmptyStream() throws IOException { String streamAsString = "{\"a\":1, \"b\":2} {\"c\":3, \"d\":4}\n{\"e\":5, \"f\":6}"; InputStream stream = new ByteArrayInputStream(streamAsString.getBytes(StandardCharsets.UTF_8)); when(dataExtractor.next()).thenReturn(Optional.of(stream)); @@ -117,7 +110,7 @@ public void testPreviewDatafed_GivenNonEmptyStream() throws IOException { verify(dataExtractor).cancel(); } - public void testPreviewDatafed_GivenFailure() throws IOException { + public void testPreviewDatafeed_GivenFailure() throws IOException { doThrow(new RuntimeException("failed")).when(dataExtractor).next(); TransportPreviewDatafeedAction.previewDatafeed(dataExtractor, actionListener); From 54e0370b3e29b010a0dc1e0029f9571f1bedff0f Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 29 Nov 2021 15:41:33 +0000 Subject: [PATCH 72/88] Track histogram of transport handling times (#80581) Adds to the transport node stats a record of the distribution of the times for which a transport thread was handling a message, represented as a histogram. Closes #80428 --- docs/reference/cluster/nodes-stats.asciidoc | 48 ++++++++++ .../test/nodes.stats/60_transport_stats.yml | 45 +++++++++ .../common/network/HandlingTimeTracker.java | 65 +++++++++++++ .../common/network/NetworkService.java | 5 + .../http/AbstractHttpServerTransport.java | 5 +- .../transport/InboundHandler.java | 11 ++- .../transport/OutboundHandler.java | 17 +++- .../elasticsearch/transport/TcpTransport.java | 11 ++- .../transport/TransportStats.java | 91 ++++++++++++++++++- .../cluster/node/stats/NodeStatsTests.java | 14 ++- .../network/HandlingTimeTrackerTests.java | 83 +++++++++++++++++ .../transport/InboundHandlerTests.java | 7 +- .../transport/OutboundHandlerTests.java | 3 +- .../transport/TcpTransportTests.java | 4 +- .../transport/TestTransportChannels.java | 3 +- 15 files changed, 393 insertions(+), 19 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/common/network/HandlingTimeTracker.java create mode 100644 server/src/test/java/org/elasticsearch/common/network/HandlingTimeTrackerTests.java diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 253890cd2a175..a909335fd30ee 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -1899,6 +1899,54 @@ Size of TX packets sent by the node during internal cluster communication. (integer) Size, in bytes, of TX packets sent by the node during internal cluster communication. + +`inbound_handling_time_histogram`:: +(array) +The distribution of the time spent handling each inbound message on a transport +thread, represented as a histogram. ++ +.Properties of `inbound_handling_time_histogram` +[%collapsible] +======= +`ge_millis`:: +(integer) +The inclusive lower bound of the bucket in milliseconds. Omitted on the first +bucket since this bucket has no lower bound. + +`lt_millis`:: +(integer) +The exclusive upper bound of the bucket in milliseconds. Omitted on the last +bucket since this bucket has no upper bound. + +`count`:: +(integer) +The number of times a transport thread took a period of time within the bounds +of this bucket to handle an inbound message. +======= + +`outbound_handling_time_histogram`:: +(array) +The distribution of the time spent sending each outbound transport message on a +transport thread, represented as a histogram. ++ +.Properties of `outbound_handling_time_histogram` +[%collapsible] +======= +`ge_millis`:: +(integer) +The inclusive lower bound of the bucket in milliseconds. Omitted on the first +bucket since this bucket has no lower bound. + +`lt_millis`:: +(integer) +The exclusive upper bound of the bucket in milliseconds. Omitted on the last +bucket since this bucket has no upper bound. + +`count`:: +(integer) +The number of times a transport thread took a period of time within the bounds +of this bucket to send a transport message. +======= ====== [[cluster-nodes-stats-api-response-body-http]] diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/60_transport_stats.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/60_transport_stats.yml index 4f4b97bbcd521..3c3b4e6dacdf5 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/60_transport_stats.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/nodes.stats/60_transport_stats.yml @@ -20,3 +20,48 @@ - gte: { nodes.$node_id.transport.tx_count: 0 } - gte: { nodes.$node_id.transport.rx_size_in_bytes: 0 } - gte: { nodes.$node_id.transport.tx_size_in_bytes: 0 } + +--- +"Transport handling time histogram": + - skip: + version: " - 8.0.99" + reason: "handling_time_histograms were added in 8.1" + features: [arbitrary_key] + + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + nodes.stats: + metric: [ transport ] + + - length: { nodes.$node_id.transport.inbound_handling_time_histogram: 18 } + + - gte: { nodes.$node_id.transport.inbound_handling_time_histogram.0.count: 0 } + - is_false: nodes.$node_id.transport.inbound_handling_time_histogram.0.ge_millis + - match: { nodes.$node_id.transport.inbound_handling_time_histogram.0.lt_millis: 1 } + + - gte: { nodes.$node_id.transport.inbound_handling_time_histogram.1.count: 0 } + - match: { nodes.$node_id.transport.inbound_handling_time_histogram.1.ge_millis: 1 } + - match: { nodes.$node_id.transport.inbound_handling_time_histogram.1.lt_millis: 2 } + + - gte: { nodes.$node_id.transport.inbound_handling_time_histogram.17.count: 0 } + - match: { nodes.$node_id.transport.inbound_handling_time_histogram.17.ge_millis: 65536 } + - is_false: nodes.$node_id.transport.inbound_handling_time_histogram.17.lt_millis + + + - length: { nodes.$node_id.transport.outbound_handling_time_histogram: 18 } + + - gte: { nodes.$node_id.transport.outbound_handling_time_histogram.0.count: 0 } + - is_false: nodes.$node_id.transport.outbound_handling_time_histogram.0.ge_millis + - match: { nodes.$node_id.transport.outbound_handling_time_histogram.0.lt_millis: 1 } + + - gte: { nodes.$node_id.transport.outbound_handling_time_histogram.1.count: 0 } + - match: { nodes.$node_id.transport.outbound_handling_time_histogram.1.ge_millis: 1 } + - match: { nodes.$node_id.transport.outbound_handling_time_histogram.1.lt_millis: 2 } + + - gte: { nodes.$node_id.transport.outbound_handling_time_histogram.17.count: 0 } + - match: { nodes.$node_id.transport.outbound_handling_time_histogram.17.ge_millis: 65536 } + - is_false: nodes.$node_id.transport.outbound_handling_time_histogram.17.lt_millis diff --git a/server/src/main/java/org/elasticsearch/common/network/HandlingTimeTracker.java b/server/src/main/java/org/elasticsearch/common/network/HandlingTimeTracker.java new file mode 100644 index 0000000000000..a2787cb2d5332 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/network/HandlingTimeTracker.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.network; + +import java.util.concurrent.atomic.LongAdder; + +/** + * Tracks how long message handling takes on a transport thread as a histogram with fixed buckets. + */ +public class HandlingTimeTracker { + + public static int[] getBucketUpperBounds() { + int[] bounds = new int[17]; + for (int i = 0; i < bounds.length; i++) { + bounds[i] = 1 << i; + } + return bounds; + } + + private static int getBucket(long handlingTimeMillis) { + if (handlingTimeMillis <= 0) { + return 0; + } else if (LAST_BUCKET_LOWER_BOUND <= handlingTimeMillis) { + return BUCKET_COUNT - 1; + } else { + return Long.SIZE - Long.numberOfLeadingZeros(handlingTimeMillis); + } + } + + public static final int BUCKET_COUNT = getBucketUpperBounds().length + 1; + + private static final long LAST_BUCKET_LOWER_BOUND = getBucketUpperBounds()[BUCKET_COUNT - 2]; + + private final LongAdder[] buckets; + + public HandlingTimeTracker() { + buckets = new LongAdder[BUCKET_COUNT]; + for (int i = 0; i < BUCKET_COUNT; i++) { + buckets[i] = new LongAdder(); + } + } + + public void addHandlingTime(long handlingTimeMillis) { + buckets[getBucket(handlingTimeMillis)].increment(); + } + + /** + * @return An array of frequencies of handling times in buckets with upper bounds as returned by {@link #getBucketUpperBounds()}, plus + * an extra bucket for handling times longer than the longest upper bound. + */ + public long[] getHistogram() { + final long[] histogram = new long[BUCKET_COUNT]; + for (int i = 0; i < BUCKET_COUNT; i++) { + histogram[i] = buckets[i].longValue(); + } + return histogram; + } + +} diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkService.java b/server/src/main/java/org/elasticsearch/common/network/NetworkService.java index f816d9446ae9b..25c6aeea4e2db 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkService.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkService.java @@ -90,11 +90,16 @@ public interface CustomNameResolver { } private final List customNameResolvers; + private final HandlingTimeTracker handlingTimeTracker = new HandlingTimeTracker(); public NetworkService(List customNameResolvers) { this.customNameResolvers = Objects.requireNonNull(customNameResolvers, "customNameResolvers must be non null"); } + public HandlingTimeTracker getHandlingTimeTracker() { + return handlingTimeTracker; + } + /** * Resolves {@code bindHosts} to a list of internet addresses. The list will * not contain duplicate addresses. diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index 5dcffec12bc90..125feb2c9fc77 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -355,11 +355,12 @@ protected void serverAcceptedChannel(HttpChannel httpChannel) { */ public void incomingRequest(final HttpRequest httpRequest, final HttpChannel httpChannel) { httpClientStatsTracker.updateClientStats(httpRequest, httpChannel); - final long startTime = threadPool.relativeTimeInMillis(); + final long startTime = threadPool.rawRelativeTimeInMillis(); try { handleIncomingRequest(httpRequest, httpChannel, httpRequest.getInboundException()); } finally { - final long took = threadPool.relativeTimeInMillis() - startTime; + final long took = threadPool.rawRelativeTimeInMillis() - startTime; + networkService.getHandlingTimeTracker().addHandlingTime(took); final long logThreshold = slowLogThresholdMs; if (logThreshold > 0 && took > logThreshold) { logger.warn( diff --git a/server/src/main/java/org/elasticsearch/transport/InboundHandler.java b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java index 78ec5eb377326..ad2e3a9e38a3c 100644 --- a/server/src/main/java/org/elasticsearch/transport/InboundHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; @@ -40,6 +41,7 @@ public class InboundHandler { private final TransportHandshaker handshaker; private final TransportKeepAlive keepAlive; private final Transport.ResponseHandlers responseHandlers; + private final HandlingTimeTracker handlingTimeTracker; private final Transport.RequestHandlers requestHandlers; private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; @@ -53,7 +55,8 @@ public class InboundHandler { TransportHandshaker handshaker, TransportKeepAlive keepAlive, Transport.RequestHandlers requestHandlers, - Transport.ResponseHandlers responseHandlers + Transport.ResponseHandlers responseHandlers, + HandlingTimeTracker handlingTimeTracker ) { this.threadPool = threadPool; this.outboundHandler = outboundHandler; @@ -62,6 +65,7 @@ public class InboundHandler { this.keepAlive = keepAlive; this.requestHandlers = requestHandlers; this.responseHandlers = responseHandlers; + this.handlingTimeTracker = handlingTimeTracker; } void setMessageListener(TransportMessageListener listener) { @@ -77,7 +81,7 @@ void setSlowLogThreshold(TimeValue slowLogThreshold) { } void inboundMessage(TcpChannel channel, InboundMessage message) throws Exception { - final long startTime = threadPool.relativeTimeInMillis(); + final long startTime = threadPool.rawRelativeTimeInMillis(); channel.getChannelStats().markAccessed(startTime); TransportLogger.logInboundMessage(channel, message); @@ -155,7 +159,8 @@ private void messageReceived(TcpChannel channel, InboundMessage message, long st } } } finally { - final long took = threadPool.relativeTimeInMillis() - startTime; + final long took = threadPool.rawRelativeTimeInMillis() - startTime; + handlingTimeTracker.addHandlingTime(took); final long logThreshold = slowLogThresholdMs; if (logThreshold > 0 && took > logThreshold) { if (isRequest) { diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java index de46d631ac73b..18aeb12b81645 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundHandler.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; import org.elasticsearch.common.network.CloseableChannel; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.transport.NetworkExceptionHelper; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -37,17 +38,26 @@ final class OutboundHandler { private final StatsTracker statsTracker; private final ThreadPool threadPool; private final Recycler recycler; + private final HandlingTimeTracker handlingTimeTracker; private volatile long slowLogThresholdMs = Long.MAX_VALUE; private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; - OutboundHandler(String nodeName, Version version, StatsTracker statsTracker, ThreadPool threadPool, Recycler recycler) { + OutboundHandler( + String nodeName, + Version version, + StatsTracker statsTracker, + ThreadPool threadPool, + Recycler recycler, + HandlingTimeTracker handlingTimeTracker + ) { this.nodeName = nodeName; this.version = version; this.statsTracker = statsTracker; this.threadPool = threadPool; this.recycler = recycler; + this.handlingTimeTracker = handlingTimeTracker; } void setSlowLogThreshold(TimeValue slowLogThreshold) { @@ -168,7 +178,7 @@ private void internalSend( @Nullable OutboundMessage message, ActionListener listener ) { - final long startTime = threadPool.relativeTimeInMillis(); + final long startTime = threadPool.rawRelativeTimeInMillis(); channel.getChannelStats().markAccessed(startTime); final long messageSize = reference.length(); TransportLogger.logOutboundMessage(channel, reference); @@ -196,7 +206,8 @@ public void onFailure(Exception e) { private void maybeLogSlowMessage(boolean success) { final long logThreshold = slowLogThresholdMs; if (logThreshold > 0) { - final long took = threadPool.relativeTimeInMillis() - startTime; + final long took = threadPool.rawRelativeTimeInMillis() - startTime; + handlingTimeTracker.addHandlingTime(took); if (took > logThreshold) { logger.warn( "sending transport message [{}] of size [{}] on [{}] took [{}ms] which is above the warn " diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 50361bdda2b7b..6462701265383 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.network.CloseableChannel; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkUtils; @@ -116,6 +117,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements private final TransportHandshaker handshaker; private final TransportKeepAlive keepAlive; + private final HandlingTimeTracker outboundHandlingTimeTracker = new HandlingTimeTracker(); private final OutboundHandler outboundHandler; private final InboundHandler inboundHandler; private final ResponseHandlers responseHandlers = new ResponseHandlers(); @@ -141,7 +143,7 @@ public TcpTransport( String nodeName = Node.NODE_NAME_SETTING.get(settings); this.recycler = createRecycler(settings, pageCacheRecycler); - this.outboundHandler = new OutboundHandler(nodeName, version, statsTracker, threadPool, recycler); + this.outboundHandler = new OutboundHandler(nodeName, version, statsTracker, threadPool, recycler, outboundHandlingTimeTracker); this.handshaker = new TransportHandshaker( version, threadPool, @@ -165,7 +167,8 @@ public TcpTransport( handshaker, keepAlive, requestHandlers, - responseHandlers + responseHandlers, + networkService.getHandlingTimeTracker() ); } @@ -918,7 +921,9 @@ public final TransportStats getStats() { messagesReceived, bytesRead, messagesSent, - bytesWritten + bytesWritten, + networkService.getHandlingTimeTracker().getHistogram(), + outboundHandlingTimeTracker.getHistogram() ); } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportStats.java b/server/src/main/java/org/elasticsearch/transport/TransportStats.java index 7caf3c241615c..d578c8437da97 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportStats.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportStats.java @@ -8,14 +8,17 @@ package org.elasticsearch.transport; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Arrays; public class TransportStats implements Writeable, ToXContentFragment { @@ -25,14 +28,28 @@ public class TransportStats implements Writeable, ToXContentFragment { private final long rxSize; private final long txCount; private final long txSize; - - public TransportStats(long serverOpen, long totalOutboundConnections, long rxCount, long rxSize, long txCount, long txSize) { + private final long[] inboundHandlingTimeBucketFrequencies; + private final long[] outboundHandlingTimeBucketFrequencies; + + public TransportStats( + long serverOpen, + long totalOutboundConnections, + long rxCount, + long rxSize, + long txCount, + long txSize, + long[] inboundHandlingTimeBucketFrequencies, + long[] outboundHandlingTimeBucketFrequencies + ) { this.serverOpen = serverOpen; this.totalOutboundConnections = totalOutboundConnections; this.rxCount = rxCount; this.rxSize = rxSize; this.txCount = txCount; this.txSize = txSize; + this.inboundHandlingTimeBucketFrequencies = inboundHandlingTimeBucketFrequencies; + this.outboundHandlingTimeBucketFrequencies = outboundHandlingTimeBucketFrequencies; + assert assertHistogramsConsistent(); } public TransportStats(StreamInput in) throws IOException { @@ -42,6 +59,20 @@ public TransportStats(StreamInput in) throws IOException { rxSize = in.readVLong(); txCount = in.readVLong(); txSize = in.readVLong(); + if (in.getVersion().onOrAfter(Version.V_8_1_0) && in.readBoolean()) { + inboundHandlingTimeBucketFrequencies = new long[HandlingTimeTracker.BUCKET_COUNT]; + for (int i = 0; i < inboundHandlingTimeBucketFrequencies.length; i++) { + inboundHandlingTimeBucketFrequencies[i] = in.readVLong(); + } + outboundHandlingTimeBucketFrequencies = new long[HandlingTimeTracker.BUCKET_COUNT]; + for (int i = 0; i < inboundHandlingTimeBucketFrequencies.length; i++) { + outboundHandlingTimeBucketFrequencies[i] = in.readVLong(); + } + } else { + inboundHandlingTimeBucketFrequencies = new long[0]; + outboundHandlingTimeBucketFrequencies = new long[0]; + } + assert assertHistogramsConsistent(); } @Override @@ -52,6 +83,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(rxSize); out.writeVLong(txCount); out.writeVLong(txSize); + if (out.getVersion().onOrAfter(Version.V_8_1_0)) { + assert (inboundHandlingTimeBucketFrequencies.length > 0) == (outboundHandlingTimeBucketFrequencies.length > 0); + out.writeBoolean(inboundHandlingTimeBucketFrequencies.length > 0); + for (long handlingTimeBucketFrequency : inboundHandlingTimeBucketFrequencies) { + out.writeVLong(handlingTimeBucketFrequency); + } + for (long handlingTimeBucketFrequency : outboundHandlingTimeBucketFrequencies) { + out.writeVLong(handlingTimeBucketFrequency); + } + } } public long serverOpen() { @@ -94,6 +135,25 @@ public ByteSizeValue getTxSize() { return txSize(); } + public long[] getInboundHandlingTimeBucketFrequencies() { + return Arrays.copyOf(inboundHandlingTimeBucketFrequencies, inboundHandlingTimeBucketFrequencies.length); + } + + public long[] getOutboundHandlingTimeBucketFrequencies() { + return Arrays.copyOf(outboundHandlingTimeBucketFrequencies, outboundHandlingTimeBucketFrequencies.length); + } + + private boolean assertHistogramsConsistent() { + assert inboundHandlingTimeBucketFrequencies.length == outboundHandlingTimeBucketFrequencies.length; + if (inboundHandlingTimeBucketFrequencies.length == 0) { + // Stats came from before v8.1 + assert Version.CURRENT.major == Version.V_8_0_0.major; + } else { + assert inboundHandlingTimeBucketFrequencies.length == HandlingTimeTracker.BUCKET_COUNT; + } + return true; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.TRANSPORT); @@ -103,10 +163,35 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.RX_SIZE_IN_BYTES, Fields.RX_SIZE, new ByteSizeValue(rxSize)); builder.field(Fields.TX_COUNT, txCount); builder.humanReadableField(Fields.TX_SIZE_IN_BYTES, Fields.TX_SIZE, new ByteSizeValue(txSize)); + if (inboundHandlingTimeBucketFrequencies.length > 0) { + histogramToXContent(builder, inboundHandlingTimeBucketFrequencies, Fields.INBOUND_HANDLING_TIME_HISTOGRAM); + histogramToXContent(builder, outboundHandlingTimeBucketFrequencies, Fields.OUTBOUND_HANDLING_TIME_HISTOGRAM); + } else { + // Stats came from before v8.1 + assert Version.CURRENT.major == Version.V_8_0_0.major; + } builder.endObject(); return builder; } + private void histogramToXContent(XContentBuilder builder, long[] bucketFrequencies, String fieldName) throws IOException { + final int[] bucketBounds = HandlingTimeTracker.getBucketUpperBounds(); + assert bucketFrequencies.length == bucketBounds.length + 1; + builder.startArray(fieldName); + for (int i = 0; i < bucketFrequencies.length; i++) { + builder.startObject(); + if (i > 0 && i <= bucketBounds.length) { + builder.field("ge_millis", bucketBounds[i - 1]); + } + if (i < bucketBounds.length) { + builder.field("lt_millis", bucketBounds[i]); + } + builder.field("count", bucketFrequencies[i]); + builder.endObject(); + } + builder.endArray(); + } + static final class Fields { static final String TRANSPORT = "transport"; static final String SERVER_OPEN = "server_open"; @@ -117,5 +202,7 @@ static final class Fields { static final String TX_COUNT = "tx_count"; static final String TX_SIZE = "tx_size"; static final String TX_SIZE_IN_BYTES = "tx_size_in_bytes"; + static final String INBOUND_HANDLING_TIME_HISTOGRAM = "inbound_handling_time_histogram"; + static final String OUTBOUND_HANDLING_TIME_HISTOGRAM = "outbound_handling_time_histogram"; } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java index 144c9f0843441..2467aded6292f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.service.ClusterStateUpdateStats; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.core.Tuple; import org.elasticsearch.discovery.DiscoveryStats; import org.elasticsearch.http.HttpStats; @@ -47,6 +48,7 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import java.util.stream.IntStream; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -238,6 +240,14 @@ public void testSerialization() throws IOException { assertEquals(nodeStats.getTransport().getServerOpen(), deserializedNodeStats.getTransport().getServerOpen()); assertEquals(nodeStats.getTransport().getTxCount(), deserializedNodeStats.getTransport().getTxCount()); assertEquals(nodeStats.getTransport().getTxSize(), deserializedNodeStats.getTransport().getTxSize()); + assertArrayEquals( + nodeStats.getTransport().getInboundHandlingTimeBucketFrequencies(), + deserializedNodeStats.getTransport().getInboundHandlingTimeBucketFrequencies() + ); + assertArrayEquals( + nodeStats.getTransport().getOutboundHandlingTimeBucketFrequencies(), + deserializedNodeStats.getTransport().getOutboundHandlingTimeBucketFrequencies() + ); } if (nodeStats.getHttp() == null) { assertNull(deserializedNodeStats.getHttp()); @@ -672,7 +682,9 @@ public static NodeStats createNodeStats() { randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), - randomNonNegativeLong() + randomNonNegativeLong(), + IntStream.range(0, HandlingTimeTracker.BUCKET_COUNT).mapToLong(i -> randomNonNegativeLong()).toArray(), + IntStream.range(0, HandlingTimeTracker.BUCKET_COUNT).mapToLong(i -> randomNonNegativeLong()).toArray() ) : null; HttpStats httpStats = null; diff --git a/server/src/test/java/org/elasticsearch/common/network/HandlingTimeTrackerTests.java b/server/src/test/java/org/elasticsearch/common/network/HandlingTimeTrackerTests.java new file mode 100644 index 0000000000000..b999cf8ff4875 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/network/HandlingTimeTrackerTests.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.network; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.greaterThan; + +public class HandlingTimeTrackerTests extends ESTestCase { + + public void testHistogram() { + final HandlingTimeTracker handlingTimeTracker = new HandlingTimeTracker(); + + assertArrayEquals(new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(0L); + assertArrayEquals(new long[] { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(1L); + assertArrayEquals(new long[] { 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(2L); + assertArrayEquals(new long[] { 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(3L); + assertArrayEquals(new long[] { 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(4L); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(127L); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(128L); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(65535L); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(65536L); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(Long.MAX_VALUE); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(randomLongBetween(65536L, Long.MAX_VALUE)); + assertArrayEquals(new long[] { 1, 1, 2, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 3 }, handlingTimeTracker.getHistogram()); + + handlingTimeTracker.addHandlingTime(randomLongBetween(Long.MIN_VALUE, 0L)); + assertArrayEquals(new long[] { 2, 1, 2, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 3 }, handlingTimeTracker.getHistogram()); + } + + public void testHistogramRandom() { + final int[] upperBounds = HandlingTimeTracker.getBucketUpperBounds(); + final long[] expectedCounts = new long[upperBounds.length + 1]; + final HandlingTimeTracker handlingTimeTracker = new HandlingTimeTracker(); + for (int i = between(0, 1000); i > 0; i--) { + final int bucket = between(0, expectedCounts.length - 1); + expectedCounts[bucket] += 1; + + final int lowerBound = bucket == 0 ? 0 : upperBounds[bucket - 1]; + final int upperBound = bucket == upperBounds.length ? randomBoolean() ? 100000 : Integer.MAX_VALUE : upperBounds[bucket] - 1; + handlingTimeTracker.addHandlingTime(between(lowerBound, upperBound)); + } + + assertArrayEquals(expectedCounts, handlingTimeTracker.getHistogram()); + } + + public void testBoundsConsistency() { + final int[] upperBounds = HandlingTimeTracker.getBucketUpperBounds(); + assertThat(upperBounds[0], greaterThan(0)); + for (int i = 1; i < upperBounds.length; i++) { + assertThat(upperBounds[i], greaterThan(upperBounds[i - 1])); + } + } + +} diff --git a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java index a482a6bd713eb..fda0090125ab8 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.core.TimeValue; @@ -69,7 +70,8 @@ public void setUp() throws Exception { version, new StatsTracker(), threadPool, - new BytesRefRecycler(PageCacheRecycler.NON_RECYCLING_INSTANCE) + new BytesRefRecycler(PageCacheRecycler.NON_RECYCLING_INSTANCE), + new HandlingTimeTracker() ); requestHandlers = new Transport.RequestHandlers(); responseHandlers = new Transport.ResponseHandlers(); @@ -80,7 +82,8 @@ public void setUp() throws Exception { handshaker, keepAlive, requestHandlers, - responseHandlers + responseHandlers, + new HandlingTimeTracker() ); } diff --git a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java index 7808831447e6b..4a85ab868d890 100644 --- a/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/OutboundHandlerTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.PageCacheRecycler; @@ -71,7 +72,7 @@ public void setUp() throws Exception { node = new DiscoveryNode("", transportAddress, Version.CURRENT); StatsTracker statsTracker = new StatsTracker(); compressionScheme = randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4); - handler = new OutboundHandler("node", Version.CURRENT, statsTracker, threadPool, recycler); + handler = new OutboundHandler("node", Version.CURRENT, statsTracker, threadPool, recycler, new HandlingTimeTracker()); final LongSupplier millisSupplier = () -> TimeValue.nsecToMSec(System.nanoTime()); final InboundDecoder decoder = new InboundDecoder(Version.CURRENT, this.recycler); diff --git a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java index 47252ceb8a124..814afba514217 100644 --- a/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TcpTransportTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; @@ -541,7 +542,8 @@ private void testExceptionHandling( Version.CURRENT, new StatsTracker(), testThreadPool, - new BytesRefRecycler(new MockPageCacheRecycler(Settings.EMPTY)) + new BytesRefRecycler(new MockPageCacheRecycler(Settings.EMPTY)), + new HandlingTimeTracker() ) ); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java b/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java index 85a45cd7a691c..15b24b0a77e6f 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/TestTransportChannels.java @@ -9,6 +9,7 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; +import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.threadpool.ThreadPool; @@ -24,7 +25,7 @@ public static TcpTransportChannel newFakeTcpTransportChannel( ) { BytesRefRecycler recycler = new BytesRefRecycler(PageCacheRecycler.NON_RECYCLING_INSTANCE); return new TcpTransportChannel( - new OutboundHandler(nodeName, version, new StatsTracker(), threadPool, recycler), + new OutboundHandler(nodeName, version, new StatsTracker(), threadPool, recycler, new HandlingTimeTracker()), channel, action, requestId, From 498f581bfc747e8442ea2f3dfc20513ad6b0c95d Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 29 Nov 2021 08:13:22 -0800 Subject: [PATCH 73/88] [ML] Updates visiblity of validate API (#81061) --- .../src/main/resources/rest-api-spec/api/ml.validate.json | 2 +- .../main/resources/rest-api-spec/api/ml.validate_detector.json | 2 +- .../resources/rest-api-spec/api/xpack-ml.validate.json | 2 +- .../resources/rest-api-spec/api/xpack-ml.validate_detector.json | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate.json index 5db5f91ddc527..b57f1bb69ffa1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate.json @@ -5,7 +5,7 @@ "description":"Validates an anomaly detection job." }, "stability":"stable", - "visibility":"public", + "visibility":"private", "headers":{ "accept": [ "application/json"], "content_type": ["application/json"] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate_detector.json b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate_detector.json index 30a24b1c6074a..1400da1ccee09 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate_detector.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/ml.validate_detector.json @@ -5,7 +5,7 @@ "description":"Validates an anomaly detection detector." }, "stability":"stable", - "visibility":"public", + "visibility":"private", "headers":{ "accept": [ "application/json"], "content_type": ["application/json"] diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate.json index 7c9bbf70f4469..ad337c3c1ad82 100644 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate.json +++ b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate.json @@ -5,7 +5,7 @@ "description":"Validates an anomaly detection job." }, "stability":"stable", - "visibility":"public", + "visibility":"private", "headers":{ "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], "content_type": ["application/json"] diff --git a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate_detector.json b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate_detector.json index fe5fdd7a7b7a1..5a06df8977dfc 100644 --- a/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate_detector.json +++ b/x-pack/qa/xpack-prefix-rest-compat/src/yamlRestTestV7Compat/resources/rest-api-spec/api/xpack-ml.validate_detector.json @@ -5,7 +5,7 @@ "description":"Validates an anomaly detection detector." }, "stability":"stable", - "visibility":"public", + "visibility":"private", "headers":{ "accept": [ "application/vnd.elasticsearch+json;compatible-with=7"], "content_type": ["application/json"] From 4d19702221d57522669f7b6a34fc291c0b184037 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Mon, 29 Nov 2021 11:52:02 -0500 Subject: [PATCH 74/88] [DOCS] Update xrefs for snapshot restore docs (#81023) Changes: * Removes a leading slash from the restore snapshot API's prerequisites. * Updates several xrefs that point to redirected pages. --- docs/reference/settings/snapshot-settings.asciidoc | 4 ++-- .../snapshot-restore/apis/restore-snapshot-api.asciidoc | 2 +- docs/reference/snapshot-restore/index.asciidoc | 6 +++--- docs/reference/snapshot-restore/restore-snapshot.asciidoc | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/reference/settings/snapshot-settings.asciidoc b/docs/reference/settings/snapshot-settings.asciidoc index 5af62143ffdd6..1f1acc3653d0d 100644 --- a/docs/reference/settings/snapshot-settings.asciidoc +++ b/docs/reference/settings/snapshot-settings.asciidoc @@ -15,7 +15,7 @@ limit. ==== {slm-init} settings -The following cluster settings configure <>. [[slm-history-index-enabled]] @@ -27,7 +27,7 @@ to the `slm-history-*` indices. Defaults to `true`. [[slm-retention-schedule]] `slm.retention_schedule`:: (<>, <>) -Controls when the <> runs. +Controls when the <> runs. Can be a periodic or absolute time schedule. Supports all values supported by the <>. Defaults to daily at 1:30am UTC: `0 30 1 * * ?`. diff --git a/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc index 3ae765a83cb39..23d7168a5e384 100644 --- a/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc @@ -92,7 +92,7 @@ the <>: + [source,console] ---- -GET /_index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream +GET _index_template/*?filter_path=index_templates.name,index_templates.index_template.index_patterns,index_templates.index_template.data_stream ---- + If no such template exists, you can <> or diff --git a/docs/reference/snapshot-restore/index.asciidoc b/docs/reference/snapshot-restore/index.asciidoc index 52bc5b23d66f4..66e93cb213903 100644 --- a/docs/reference/snapshot-restore/index.asciidoc +++ b/docs/reference/snapshot-restore/index.asciidoc @@ -23,9 +23,9 @@ repository. Before you can take or restore snapshots, you must * Microsoft Azure After you register a snapshot repository, you can use -<> to automatically take and -manage snapshots. You can then <> -to recover or transfer its data. +<> to automatically take and manage +snapshots. You can then <> to +recover or transfer its data. [discrete] [[snapshot-contents]] diff --git a/docs/reference/snapshot-restore/restore-snapshot.asciidoc b/docs/reference/snapshot-restore/restore-snapshot.asciidoc index 7f320de5ebca6..0a12f5966617d 100644 --- a/docs/reference/snapshot-restore/restore-snapshot.asciidoc +++ b/docs/reference/snapshot-restore/restore-snapshot.asciidoc @@ -303,7 +303,7 @@ specific indices or data streams instead. If you're restoring to a different cluster, see <> before you start. -. If you <>, you can restore them to each node. This step is optional and requires a <>. + From 5c8e7c686e96bef5193a4b8d7f756e93a3b30a7e Mon Sep 17 00:00:00 2001 From: David Roberts Date: Mon, 29 Nov 2021 17:06:44 +0000 Subject: [PATCH 75/88] [ML] Switch message and detail for model snapshot deprecations (#81108) This is a fix to the fix of #81060. In the original fix where I tried to port the changes from #79387 to master I didn't notice that the text of the message and detail of the deprecation had been largely switched around. My tweaks to the wording in #81060 did not make this major switch. This PR switches the two strings we generate. This is only for 8.0 and 8.1. For 7.16 the discrepancy became obvious in the backport of #81060 to that branch, so it's already correct there. --- .../xpack/deprecation/MlDeprecationIT.java | 2 +- .../xpack/deprecation/MlDeprecationChecker.java | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java index 3fc880adcf235..dad4d25afe62b 100644 --- a/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java +++ b/x-pack/plugin/deprecation/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/deprecation/MlDeprecationIT.java @@ -105,7 +105,7 @@ public void testMlDeprecationChecks() throws Exception { assertThat(response.getMlSettingsIssues(), hasSize(1)); assertThat( response.getMlSettingsIssues().get(0).getMessage(), - containsString("Delete model snapshot [1] or update it to 7.0.0 or greater") + containsString("Model snapshot [1] for job [deprecation_check_job] has an obsolete minimum version") ); assertThat(response.getMlSettingsIssues().get(0).getMeta(), equalTo(Map.of("job_id", jobId, "snapshot_id", "1"))); } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java index 36092a820844f..2988af7a2dab6 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/MlDeprecationChecker.java @@ -73,12 +73,9 @@ static Optional checkModelSnapshot(ModelSnapshot modelSnapshot StringBuilder details = new StringBuilder( String.format( Locale.ROOT, - // Important: the Kibana upgrade assistant expects this to match the pattern /[Mm]odel snapshot/ - // and if it doesn't then the expected "Fix" button won't appear for this deprecation. - "Model snapshot [%s] for job [%s] has an obsolete minimum version [%s].", + "Delete model snapshot [%s] or update it to %s or greater.", modelSnapshot.getSnapshotId(), - modelSnapshot.getJobId(), - modelSnapshot.getMinVersion() + MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION ) ); if (modelSnapshot.getLatestRecordTimeStamp() != null) { @@ -95,9 +92,12 @@ static Optional checkModelSnapshot(ModelSnapshot modelSnapshot DeprecationIssue.Level.CRITICAL, String.format( Locale.ROOT, - "Delete model snapshot [%s] or update it to %s or greater.", + // Important: the Kibana upgrade assistant expects this to match the pattern /[Mm]odel snapshot/ + // and if it doesn't then the expected "Fix" button won't appear for this deprecation. + "Model snapshot [%s] for job [%s] has an obsolete minimum version [%s].", modelSnapshot.getSnapshotId(), - MIN_REPORTED_SUPPORTED_SNAPSHOT_VERSION + modelSnapshot.getJobId(), + modelSnapshot.getMinVersion() ), "https://www.elastic.co/guide/en/elasticsearch/reference/master/ml-upgrade-job-model-snapshot.html", details.toString(), From 1adb59c041c000f8005d8af2eb6f06f87fc82f2a Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 29 Nov 2021 09:41:03 -0800 Subject: [PATCH 76/88] Split off the values supplier for ScriptDocValues (#80635) This change makes all ScriptDocValues purely a wrapper around a supplier. (Similar to what FieldValues was.) However, there are some important differences: * This is meant to be transitory. As more DocValuesFields are completed, more of the simple suppliers (ones that aren't DocValuesFields) can be removed. * ScriptDocValues is the wrapper rather than the supplier. DocValuesFields are eventually the target suppliers which makes it really easy to remove the simple suppliers once they are no longer necessary. * ScriptDocValues can be easily deprecated and removed without having to move their code to DocValuesFields. Once ScriptDocValues is removed we can remove the supplier code from DocValuesFields. * DelegateDocValuesField ensures that any ScriptDocValues field are not supplied by another DocValuesField with an assert statement. This helps us to identify bugs during testing. * ScriptDocValues no longer have setNextDocId. This helps us identify bugs during compilation. * Conversions will not share/wrap suppliers since the suppliers are transitory. --- .../mapper/extras/ScaledFloatFieldMapper.java | 3 +- .../mapper/murmur3/Murmur3FieldMapper.java | 6 +- .../index/fielddata/IpScriptFieldData.java | 15 +- .../index/fielddata/ScriptDocValues.java | 289 ++++++++++++------ .../fielddata/StringScriptFieldData.java | 3 +- .../plain/AbstractLeafGeoPointFieldData.java | 3 +- .../plain/AbstractLeafOrdinalsFieldData.java | 2 +- .../plain/BinaryDVLeafFieldData.java | 2 +- .../plain/StringBinaryDVLeafFieldData.java | 2 +- .../index/mapper/DateFieldMapper.java | 9 +- .../index/mapper/DoubleScriptFieldType.java | 3 +- .../index/mapper/IdFieldMapper.java | 2 +- .../index/mapper/IpFieldMapper.java | 72 +++-- .../index/mapper/LongScriptFieldType.java | 3 +- .../index/mapper/NumberFieldMapper.java | 16 +- .../index/mapper/SeqNoFieldMapper.java | 3 +- .../index/mapper/VersionFieldMapper.java | 3 +- .../script/ScoreScriptUtils.java | 2 +- .../script/field/BinaryDocValuesField.java | 5 +- .../script/field/BooleanDocValuesField.java | 64 ++-- .../script/field/DelegateDocValuesField.java | 5 +- .../ScriptDocValuesGeoPointsTests.java | 19 +- .../fielddata/ScriptDocValuesLongsTests.java | 7 +- .../plain/HalfFloatFielddataTests.java | 5 +- .../index/mapper/DateFieldTypeTests.java | 3 +- .../index/mapper/IpScriptFieldTypeTests.java | 4 +- .../query/SearchExecutionContextTests.java | 16 +- .../sampler/DiversifiedSamplerTests.java | 3 +- .../search/lookup/LeafDocLookupTests.java | 12 +- .../AggregateDoubleMetricFieldMapper.java | 6 +- .../UnsignedLongDocValuesField.java | 35 ++- .../UnsignedLongScriptDocValues.java | 23 +- .../org.elasticsearch.xpack.unsignedlong.txt | 7 +- .../versionfield/VersionScriptDocValues.java | 57 ++-- .../VersionStringFieldMapper.java | 7 +- .../AbstractAtomicGeoShapeShapeFieldData.java | 55 +++- .../BinaryDenseVectorScriptDocValues.java | 70 +++-- .../query/DenseVectorScriptDocValues.java | 25 +- .../query/KnnDenseVectorScriptDocValues.java | 73 +++-- .../xpack/vectors/query/ScoreScriptUtils.java | 2 +- .../vectors/query/VectorDVLeafFieldData.java | 29 +- ...BinaryDenseVectorScriptDocValuesTests.java | 25 +- .../query/DenseVectorFunctionTests.java | 7 +- .../KnnDenseVectorScriptDocValuesTests.java | 21 +- 44 files changed, 666 insertions(+), 357 deletions(-) diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index 0b2040c10e3ed..7589d196fd5a1 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.fielddata.LeafNumericFieldData; import org.elasticsearch.index.fielddata.NumericDoubleValues; import org.elasticsearch.index.fielddata.ScriptDocValues.Doubles; +import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; @@ -269,7 +270,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S return new ScaledFloatIndexFieldData( scaledValues, scalingFactor, - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ); }; } diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java index bdbb251e1e478..dafc303dae601 100644 --- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java +++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java @@ -19,6 +19,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.ScriptDocValues.Longs; +import org.elasticsearch.index.fielddata.ScriptDocValues.LongsSupplier; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; @@ -83,7 +84,10 @@ public Murmur3FieldMapper build(MapperBuilderContext context) { // this only exists so a check can be done to match the field type to using murmur3 hashing... public static class Murmur3FieldType extends MappedFieldType { - public static final ToScriptField TO_SCRIPT_FIELD = (dv, n) -> new DelegateDocValuesField(new Longs(dv), n); + public static final ToScriptField TO_SCRIPT_FIELD = (dv, n) -> new DelegateDocValuesField( + new Longs(new LongsSupplier(dv)), + n + ); private Murmur3FieldType(String name, boolean isStored, Map meta) { super(name, false, isStored, true, TextSearchInfo.NONE, meta); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/IpScriptFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/IpScriptFieldData.java index 6075d5db84106..a0a34ed610288 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/IpScriptFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/IpScriptFieldData.java @@ -14,6 +14,8 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.network.InetAddresses; +import org.elasticsearch.index.fielddata.ScriptDocValues.Strings; +import org.elasticsearch.index.fielddata.ScriptDocValues.StringsSupplier; import org.elasticsearch.index.mapper.IpFieldMapper; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.script.IpFieldScript; @@ -53,7 +55,7 @@ public BinaryScriptLeafFieldData loadDirect(LeafReaderContext context) throws Ex return new BinaryScriptLeafFieldData() { @Override public DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new IpScriptDocValues(getBytesValues()), name); + return new DelegateDocValuesField(new Strings(new IpSupplier(getBytesValues())), name); } @Override @@ -69,18 +71,19 @@ public ValuesSourceType getValuesSourceType() { } /** - * Doc values implementation for ips. We can't share + * Doc values supplier implementation for ips. We can't share * {@link IpFieldMapper.IpFieldType.IpScriptDocValues} because it is based * on global ordinals and we don't have those. */ - public static class IpScriptDocValues extends ScriptDocValues.Strings { - public IpScriptDocValues(SortedBinaryDocValues in) { + public static class IpSupplier extends StringsSupplier { + + public IpSupplier(SortedBinaryDocValues in) { super(in); } @Override - protected String bytesToString(BytesRef bytes) { - InetAddress addr = InetAddressPoint.decode(BytesReference.toBytes(new BytesArray(bytes))); + protected String bytesToString(BytesRef bytesRef) { + InetAddress addr = InetAddressPoint.decode(BytesReference.toBytes(new BytesArray(bytesRef))); return InetAddresses.toAddrString(addr); } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java index 9003a32db09f0..b0a769800825c 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java @@ -17,8 +17,7 @@ import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.geometry.utils.Geohash; -import org.elasticsearch.script.field.BinaryDocValuesField; -import org.elasticsearch.script.field.BooleanDocValuesField; +import org.elasticsearch.script.field.DocValuesField; import java.io.IOException; import java.time.Instant; @@ -39,9 +38,30 @@ public abstract class ScriptDocValues extends AbstractList { /** - * Set the current doc ID. + * Supplies values to different ScriptDocValues as we + * convert them to wrappers around {@link DocValuesField}. + * This allows for different {@link DocValuesField} to implement + * this supplier class in many-to-one relationship since + * {@link DocValuesField} are more specific where + * ({byte, short, int, long, _version, murmur3, etc.} -> {long}) */ - public abstract void setNextDocId(int docId) throws IOException; + public interface Supplier { + void setNextDocId(int docId) throws IOException; + + T getInternal(int index); + + int size(); + } + + protected final Supplier supplier; + + public ScriptDocValues(Supplier supplier) { + this.supplier = supplier; + } + + public Supplier getSupplier() { + return supplier; + } // Throw meaningful exceptions if someone tries to modify the ScriptDocValues. @Override @@ -77,15 +97,13 @@ protected void throwIfEmpty() { } } - public static final class Longs extends ScriptDocValues { + public static class LongsSupplier implements Supplier { + private final SortedNumericDocValues in; private long[] values = new long[0]; private int count; - /** - * Standard constructor. - */ - public Longs(SortedNumericDocValues in) { + public LongsSupplier(SortedNumericDocValues in) { this.in = in; } @@ -105,11 +123,28 @@ public void setNextDocId(int docId) throws IOException { * Set the {@link #size()} and ensure that the {@link #values} array can * store at least that many entries. */ - protected void resize(int newSize) { + private void resize(int newSize) { count = newSize; values = ArrayUtil.grow(values, count); } + @Override + public Long getInternal(int index) { + return values[index]; + } + + @Override + public int size() { + return count; + } + } + + public static class Longs extends ScriptDocValues { + + public Longs(Supplier supplier) { + super(supplier); + } + public long getValue() { return get(0); } @@ -117,16 +152,16 @@ public long getValue() { @Override public Long get(int index) { throwIfEmpty(); - return values[index]; + return supplier.getInternal(index); } @Override public int size() { - return count; + return supplier.size(); } } - public static final class Dates extends ScriptDocValues { + public static class DatesSupplier implements Supplier { private final SortedNumericDocValues in; private final boolean isNanos; @@ -137,32 +172,13 @@ public static final class Dates extends ScriptDocValues { private ZonedDateTime[] dates; private int count; - public Dates(SortedNumericDocValues in, boolean isNanos) { + public DatesSupplier(SortedNumericDocValues in, boolean isNanos) { this.in = in; this.isNanos = isNanos; } - /** - * Fetch the first field value or 0 millis after epoch if there are no - * in. - */ - public ZonedDateTime getValue() { - return get(0); - } - @Override - public ZonedDateTime get(int index) { - if (count == 0) { - throw new IllegalStateException( - "A document doesn't have a value for a field! " - + "Use doc[].size()==0 to check if a document is missing a field!" - ); - } - if (index >= count) { - throw new IndexOutOfBoundsException( - "attempted to fetch the [" + index + "] date when there are only [" + count + "] dates." - ); - } + public ZonedDateTime getInternal(int index) { return dates[index]; } @@ -184,7 +200,7 @@ public void setNextDocId(int docId) throws IOException { /** * Refresh the backing array. Package private so it can be called when {@link Longs} loads dates. */ - void refreshArray() throws IOException { + private void refreshArray() throws IOException { if (count == 0) { return; } @@ -202,13 +218,49 @@ void refreshArray() throws IOException { } } - public static final class Doubles extends ScriptDocValues { + public static class Dates extends ScriptDocValues { + + public Dates(Supplier supplier) { + super(supplier); + } + + /** + * Fetch the first field value or 0 millis after epoch if there are no + * in. + */ + public ZonedDateTime getValue() { + return get(0); + } + + @Override + public ZonedDateTime get(int index) { + if (supplier.size() == 0) { + throw new IllegalStateException( + "A document doesn't have a value for a field! " + + "Use doc[].size()==0 to check if a document is missing a field!" + ); + } + if (index >= supplier.size()) { + throw new IndexOutOfBoundsException( + "attempted to fetch the [" + index + "] date when there are only [" + supplier.size() + "] dates." + ); + } + return supplier.getInternal(index); + } + + @Override + public int size() { + return supplier.size(); + } + } + + public static class DoublesSupplier implements Supplier { private final SortedNumericDoubleValues in; private double[] values = new double[0]; private int count; - public Doubles(SortedNumericDoubleValues in) { + public DoublesSupplier(SortedNumericDoubleValues in) { this.in = in; } @@ -228,13 +280,26 @@ public void setNextDocId(int docId) throws IOException { * Set the {@link #size()} and ensure that the {@link #values} array can * store at least that many entries. */ - protected void resize(int newSize) { + private void resize(int newSize) { count = newSize; values = ArrayUtil.grow(values, count); } - public SortedNumericDoubleValues getInternalValues() { - return this.in; + @Override + public Double getInternal(int index) { + return values[index]; + } + + @Override + public int size() { + return count; + } + } + + public static class Doubles extends ScriptDocValues { + + public Doubles(Supplier supplier) { + super(supplier); } public double getValue() { @@ -243,22 +308,27 @@ public double getValue() { @Override public Double get(int index) { - if (count == 0) { + if (supplier.size() == 0) { throw new IllegalStateException( "A document doesn't have a value for a field! " + "Use doc[].size()==0 to check if a document is missing a field!" ); } - return values[index]; + return supplier.getInternal(index); } @Override public int size() { - return count; + return supplier.size(); } } public abstract static class Geometry extends ScriptDocValues { + + public Geometry(Supplier supplier) { + super(supplier); + } + /** Returns the dimensional type of this geometry */ public abstract int getDimensionalType(); @@ -275,7 +345,14 @@ public abstract static class Geometry extends ScriptDocValues { public abstract double getMercatorHeight(); } - public static final class GeoPoints extends Geometry { + public interface GeometrySupplier extends Supplier { + + GeoPoint getCentroid(); + + GeoBoundingBox getBoundingBox(); + } + + public static class GeoPointsSupplier implements GeometrySupplier { private final MultiGeoPointValues in; private GeoPoint[] values = new GeoPoint[0]; @@ -283,7 +360,7 @@ public static final class GeoPoints extends Geometry { private final GeoBoundingBox boundingBox = new GeoBoundingBox(new GeoPoint(), new GeoPoint()); private int count; - public GeoPoints(MultiGeoPointValues in) { + public GeoPointsSupplier(MultiGeoPointValues in) { this.in = in; } @@ -335,7 +412,7 @@ private void setMultiValue() throws IOException { * Set the {@link #size()} and ensure that the {@link #values} array can * store at least that many entries. */ - protected void resize(int newSize) { + private void resize(int newSize) { count = newSize; if (newSize > values.length) { int oldLength = values.length; @@ -346,6 +423,36 @@ protected void resize(int newSize) { } } + @Override + public GeoPoint getInternal(int index) { + return values[index]; + } + + @Override + public GeoPoint getCentroid() { + return centroid; + } + + @Override + public GeoBoundingBox getBoundingBox() { + return boundingBox; + } + + @Override + public int size() { + return count; + } + } + + public static class GeoPoints extends Geometry { + + private final GeometrySupplier geometrySupplier; + + public GeoPoints(GeometrySupplier supplier) { + super(supplier); + geometrySupplier = supplier; + } + public GeoPoint getValue() { return get(0); } @@ -376,19 +483,19 @@ public double getLon() { @Override public GeoPoint get(int index) { - if (count == 0) { + if (supplier.size() == 0) { throw new IllegalStateException( "A document doesn't have a value for a field! " + "Use doc[].size()==0 to check if a document is missing a field!" ); } - final GeoPoint point = values[index]; + final GeoPoint point = supplier.getInternal(index); return new GeoPoint(point.lat(), point.lon()); } @Override public int size() { - return count; + return supplier.size(); } public double arcDistance(double lat, double lon) { @@ -434,7 +541,7 @@ public int getDimensionalType() { @Override public GeoPoint getCentroid() { - return size() == 0 ? null : centroid; + return size() == 0 ? null : geometrySupplier.getCentroid(); } @Override @@ -449,21 +556,14 @@ public double getMercatorHeight() { @Override public GeoBoundingBox getBoundingBox() { - return size() == 0 ? null : boundingBox; + return size() == 0 ? null : geometrySupplier.getBoundingBox(); } } - public static final class Booleans extends ScriptDocValues { - - private final BooleanDocValuesField booleanDocValuesField; + public static class Booleans extends ScriptDocValues { - public Booleans(BooleanDocValuesField booleanDocValuesField) { - this.booleanDocValuesField = booleanDocValuesField; - } - - @Override - public void setNextDocId(int docId) throws IOException { - throw new UnsupportedOperationException(); + public Booleans(Supplier supplier) { + super(supplier); } public boolean getValue() { @@ -474,22 +574,22 @@ public boolean getValue() { @Override public Boolean get(int index) { throwIfEmpty(); - return booleanDocValuesField.getInternal(index); + return supplier.getInternal(index); } @Override public int size() { - return booleanDocValuesField.size(); + return supplier.size(); } } - abstract static class BinaryScriptDocValues extends ScriptDocValues { + public static class StringsSupplier implements Supplier { private final SortedBinaryDocValues in; - protected BytesRefBuilder[] values = new BytesRefBuilder[0]; - protected int count; + private BytesRefBuilder[] values = new BytesRefBuilder[0]; + private int count; - BinaryScriptDocValues(SortedBinaryDocValues in) { + public StringsSupplier(SortedBinaryDocValues in) { this.in = in; } @@ -512,7 +612,7 @@ public void setNextDocId(int docId) throws IOException { * Set the {@link #size()} and ensure that the {@link #values} array can * store at least that many entries. */ - protected void resize(int newSize) { + private void resize(int newSize) { count = newSize; if (newSize > values.length) { final int oldLength = values.length; @@ -523,51 +623,52 @@ protected void resize(int newSize) { } } + protected String bytesToString(BytesRef bytesRef) { + return bytesRef.utf8ToString(); + } + + @Override + public String getInternal(int index) { + return bytesToString(values[index].toBytesRef()); + } + @Override public int size() { return count; } } - public static class Strings extends BinaryScriptDocValues { - public Strings(SortedBinaryDocValues in) { - super(in); + public static class Strings extends ScriptDocValues { + + public Strings(Supplier supplier) { + super(supplier); + } + + public String getValue() { + return get(0); } @Override - public final String get(int index) { - if (count == 0) { + public String get(int index) { + if (supplier.size() == 0) { throw new IllegalStateException( "A document doesn't have a value for a field! " + "Use doc[].size()==0 to check if a document is missing a field!" ); } - return bytesToString(values[index].get()); - } - - /** - * Convert the stored bytes to a String. - */ - protected String bytesToString(BytesRef bytes) { - return bytes.utf8ToString(); + return supplier.getInternal(index); } - public final String getValue() { - return get(0); + @Override + public int size() { + return supplier.size(); } } public static final class BytesRefs extends ScriptDocValues { - private final BinaryDocValuesField binaryDocValuesField; - - public BytesRefs(BinaryDocValuesField binaryDocValuesField) { - this.binaryDocValuesField = binaryDocValuesField; - } - - @Override - public void setNextDocId(int docId) throws IOException { - throw new UnsupportedOperationException(); + public BytesRefs(Supplier supplier) { + super(supplier); } public BytesRef getValue() { @@ -578,12 +679,12 @@ public BytesRef getValue() { @Override public BytesRef get(int index) { throwIfEmpty(); - return binaryDocValuesField.getInternal(index); + return supplier.getInternal(index); } @Override public int size() { - return binaryDocValuesField.size(); + return supplier.size(); } } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/StringScriptFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/StringScriptFieldData.java index 641801ad32a5e..5b7486352a7f9 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/StringScriptFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/StringScriptFieldData.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.fielddata; import org.apache.lucene.index.LeafReaderContext; +import org.elasticsearch.index.fielddata.ScriptDocValues.StringsSupplier; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.script.StringFieldScript; import org.elasticsearch.script.field.DelegateDocValuesField; @@ -45,7 +46,7 @@ public BinaryScriptLeafFieldData loadDirect(LeafReaderContext context) throws Ex return new BinaryScriptLeafFieldData() { @Override public DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new ScriptDocValues.Strings(getBytesValues()), name); + return new DelegateDocValuesField(new ScriptDocValues.Strings(new StringsSupplier(getBytesValues())), name); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafGeoPointFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafGeoPointFieldData.java index fd5018dff3bae..b115c57cd191d 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafGeoPointFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafGeoPointFieldData.java @@ -12,6 +12,7 @@ import org.elasticsearch.index.fielddata.LeafGeoPointFieldData; import org.elasticsearch.index.fielddata.MultiGeoPointValues; import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.index.fielddata.ScriptDocValues.GeoPointsSupplier; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.script.field.DelegateDocValuesField; import org.elasticsearch.script.field.DocValuesField; @@ -28,7 +29,7 @@ public final SortedBinaryDocValues getBytesValues() { @Override public final DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new ScriptDocValues.GeoPoints(getGeoPointValues()), name); + return new DelegateDocValuesField(new ScriptDocValues.GeoPoints(new GeoPointsSupplier(getGeoPointValues())), name); } public static LeafGeoPointFieldData empty(final int maxDoc) { diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafOrdinalsFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafOrdinalsFieldData.java index ff479cde06669..0303db063455c 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafOrdinalsFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AbstractLeafOrdinalsFieldData.java @@ -26,7 +26,7 @@ public abstract class AbstractLeafOrdinalsFieldData implements LeafOrdinalsField public static final Function> DEFAULT_SCRIPT_FUNCTION = ((Function< SortedSetDocValues, - SortedBinaryDocValues>) FieldData::toString).andThen(ScriptDocValues.Strings::new); + SortedBinaryDocValues>) FieldData::toString).andThen(ScriptDocValues.StringsSupplier::new).andThen(ScriptDocValues.Strings::new); private final Function> scriptFunction; diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVLeafFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVLeafFieldData.java index b7db7ba8ee54a..5e245a5de6c8f 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVLeafFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/BinaryDVLeafFieldData.java @@ -46,7 +46,7 @@ public SortedBinaryDocValues getBytesValues() { @Override public DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new ScriptDocValues.Strings(getBytesValues()), name); + return new DelegateDocValuesField(new ScriptDocValues.Strings(new ScriptDocValues.StringsSupplier(getBytesValues())), name); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/StringBinaryDVLeafFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/StringBinaryDVLeafFieldData.java index d1861a23c16e7..bbd5dd486e034 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/StringBinaryDVLeafFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/StringBinaryDVLeafFieldData.java @@ -20,6 +20,6 @@ final class StringBinaryDVLeafFieldData extends AbstractBinaryDVLeafFieldData { @Override public DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new ScriptDocValues.Strings(getBytesValues()), name); + return new DelegateDocValuesField(new ScriptDocValues.Strings(new ScriptDocValues.StringsSupplier(getBytesValues())), name); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index fa9d406f2ad55..71fca6ccec7e0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.ScriptDocValues.Dates; +import org.elasticsearch.index.fielddata.ScriptDocValues.DatesSupplier; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.query.DateRangeIncludingNowQuery; import org.elasticsearch.index.query.QueryRewriteContext; @@ -80,7 +81,7 @@ public final class DateFieldMapper extends FieldMapper { private static final DateMathParser EPOCH_MILLIS_PARSER = DateFormatter.forPattern("epoch_millis").toDateMathParser(); public enum Resolution { - MILLISECONDS(CONTENT_TYPE, NumericType.DATE, (dv, n) -> new DelegateDocValuesField(new Dates(dv, false), n)) { + MILLISECONDS(CONTENT_TYPE, NumericType.DATE, (dv, n) -> new DelegateDocValuesField(new Dates(new DatesSupplier(dv, false)), n)) { @Override public long convert(Instant instant) { return instant.toEpochMilli(); @@ -111,7 +112,11 @@ protected Query distanceFeatureQuery(String field, float boost, long origin, Tim return LongPoint.newDistanceFeatureQuery(field, boost, origin, pivot.getMillis()); } }, - NANOSECONDS(DATE_NANOS_CONTENT_TYPE, NumericType.DATE_NANOSECONDS, (dv, n) -> new DelegateDocValuesField(new Dates(dv, true), n)) { + NANOSECONDS( + DATE_NANOS_CONTENT_TYPE, + NumericType.DATE_NANOSECONDS, + (dv, n) -> new DelegateDocValuesField(new Dates(new DatesSupplier(dv, true)), n) + ) { @Override public long convert(Instant instant) { return toLong(instant); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java index c28a4c4de9727..9979124c2d7e5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DoubleScriptFieldType.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.index.fielddata.DoubleScriptFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues.Doubles; +import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.CompositeFieldScript; @@ -103,7 +104,7 @@ public DoubleScriptFieldData.Builder fielddataBuilder(String fullyQualifiedIndex return new DoubleScriptFieldData.Builder( name(), leafFactory(searchLookup.get()), - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java index df0c2ae158a13..13f672407ef67 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IdFieldMapper.java @@ -220,7 +220,7 @@ public long ramBytesUsed() { @Override public DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new ScriptDocValues.Strings(getBytesValues()), name); + return new DelegateDocValuesField(new ScriptDocValues.Strings(new ScriptDocValues.StringsSupplier(getBytesValues())), name); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index b1497c8e988dc..f52baf430783d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.plain.SortedSetOrdinalsIndexFieldData; +import org.elasticsearch.index.mapper.IpFieldMapper.IpFieldType.IpScriptDocValues.IpSupplier; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.IpFieldScript; import org.elasticsearch.script.Script; @@ -351,27 +352,52 @@ public static Query rangeQuery( public static final class IpScriptDocValues extends ScriptDocValues { - private final SortedSetDocValues in; - private long[] ords = new long[0]; - private int count; + public static final class IpSupplier implements ScriptDocValues.Supplier { - public IpScriptDocValues(SortedSetDocValues in) { - this.in = in; - } + private final SortedSetDocValues in; + private long[] ords = new long[0]; + private int count; - @Override - public void setNextDocId(int docId) throws IOException { - count = 0; - if (in.advanceExact(docId)) { - for (long ord = in.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = in.nextOrd()) { - ords = ArrayUtil.grow(ords, count + 1); - ords[count++] = ord; + public IpSupplier(SortedSetDocValues in) { + this.in = in; + } + + @Override + public void setNextDocId(int docId) throws IOException { + count = 0; + if (in.advanceExact(docId)) { + for (long ord = in.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = in.nextOrd()) { + ords = ArrayUtil.grow(ords, count + 1); + ords[count++] = ord; + } + } + } + + @Override + public String getInternal(int index) { + try { + BytesRef encoded = in.lookupOrd(ords[index]); + InetAddress address = InetAddressPoint.decode( + Arrays.copyOfRange(encoded.bytes, encoded.offset, encoded.offset + encoded.length) + ); + return InetAddresses.toAddrString(address); + } catch (IOException e) { + throw new RuntimeException(e); } } + + @Override + public int size() { + return count; + } + } + + public IpScriptDocValues(IpSupplier supplier) { + super(supplier); } public String getValue() { - if (count == 0) { + if (supplier.size() == 0) { return null; } else { return get(0); @@ -380,27 +406,23 @@ public String getValue() { @Override public String get(int index) { - try { - BytesRef encoded = in.lookupOrd(ords[index]); - InetAddress address = InetAddressPoint.decode( - Arrays.copyOfRange(encoded.bytes, encoded.offset, encoded.offset + encoded.length) - ); - return InetAddresses.toAddrString(address); - } catch (IOException e) { - throw new RuntimeException(e); - } + return supplier.getInternal(index); } @Override public int size() { - return count; + return supplier.size(); } } @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { failIfNoDocValues(); - return new SortedSetOrdinalsIndexFieldData.Builder(name(), IpScriptDocValues::new, CoreValuesSourceType.IP); + return new SortedSetOrdinalsIndexFieldData.Builder( + name(), + s -> new IpScriptDocValues(new IpSupplier(s)), + CoreValuesSourceType.IP + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java index 03abc9c7c0ae9..4791eb5398ff5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/LongScriptFieldType.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.index.fielddata.LongScriptFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues.Longs; +import org.elasticsearch.index.fielddata.ScriptDocValues.LongsSupplier; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.CompositeFieldScript; @@ -98,7 +99,7 @@ public LongScriptFieldData.Builder fielddataBuilder(String fullyQualifiedIndexNa return new LongScriptFieldData.Builder( name(), leafFactory(searchLookup.get()), - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index b8253c76ceac2..075d475029930 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -36,7 +36,9 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.ScriptDocValues.Doubles; +import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.fielddata.ScriptDocValues.Longs; +import org.elasticsearch.index.fielddata.ScriptDocValues.LongsSupplier; import org.elasticsearch.index.fielddata.plain.SortedDoublesIndexFieldData; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.mapper.TimeSeriesParams.MetricType; @@ -335,7 +337,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedDoublesIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ); } @@ -446,7 +448,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedDoublesIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ); } @@ -540,7 +542,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedDoublesIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ); } @@ -621,7 +623,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedNumericIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } }, @@ -692,7 +694,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedNumericIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } }, @@ -822,7 +824,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedNumericIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } }, @@ -922,7 +924,7 @@ public IndexFieldData.Builder getFieldDataBuilder(String name) { return new SortedNumericIndexFieldData.Builder( name, numericType(), - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } }; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java index a30aa20d0d1e7..e21186ca678bd 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SeqNoFieldMapper.java @@ -18,6 +18,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.ScriptDocValues.Longs; +import org.elasticsearch.index.fielddata.ScriptDocValues.LongsSupplier; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -188,7 +189,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S return new SortedNumericIndexFieldData.Builder( name(), NumericType.LONG, - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java index 404c0ca7d7cb3..aa82c3371c508 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/VersionFieldMapper.java @@ -14,6 +14,7 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.ScriptDocValues.Longs; +import org.elasticsearch.index.fielddata.ScriptDocValues.LongsSupplier; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.SearchExecutionContext; @@ -62,7 +63,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S return new SortedNumericIndexFieldData.Builder( name(), NumericType.LONG, - (dv, n) -> new DelegateDocValuesField(new Longs(dv), n) + (dv, n) -> new DelegateDocValuesField(new Longs(new LongsSupplier(dv)), n) ); } } diff --git a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java index 51d81c87fcddb..6837281bf1523 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java @@ -57,7 +57,7 @@ public RandomScoreField(ScoreScript scoreScript, int seed, String fieldName) { public double randomScore() { try { - docValues.setNextDocId(scoreScript._getDocId()); + docValues.getSupplier().setNextDocId(scoreScript._getDocId()); String seedValue = String.valueOf(docValues.get(0)); int hash = StringHelper.murmurhash3_x86_32(new BytesRef(seedValue), saltedSeed); return (hash & 0x00FFFFFF) / (float) (1 << 24); // only use the lower 24 bits to construct a float from 0.0-1.0 diff --git a/server/src/main/java/org/elasticsearch/script/field/BinaryDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/BinaryDocValuesField.java index 565afea2b3ade..8db429a13639d 100644 --- a/server/src/main/java/org/elasticsearch/script/field/BinaryDocValuesField.java +++ b/server/src/main/java/org/elasticsearch/script/field/BinaryDocValuesField.java @@ -20,7 +20,7 @@ import java.util.Iterator; import java.util.NoSuchElementException; -public class BinaryDocValuesField implements DocValuesField { +public class BinaryDocValuesField implements DocValuesField, ScriptDocValues.Supplier { private final SortedBinaryDocValues input; private final String name; @@ -74,8 +74,9 @@ public ScriptDocValues getScriptDocValues() { return bytesRefs; } - // this method is required to support the ByteRef return values + // this method is required to support the Boolean return values // for the old-style "doc" access in ScriptDocValues + @Override public BytesRef getInternal(int index) { return values[index].toBytesRef(); } diff --git a/server/src/main/java/org/elasticsearch/script/field/BooleanDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/BooleanDocValuesField.java index 6eed4eef37e3a..8dd3674feb470 100644 --- a/server/src/main/java/org/elasticsearch/script/field/BooleanDocValuesField.java +++ b/server/src/main/java/org/elasticsearch/script/field/BooleanDocValuesField.java @@ -17,7 +17,7 @@ import java.util.Iterator; import java.util.NoSuchElementException; -public class BooleanDocValuesField implements DocValuesField { +public class BooleanDocValuesField implements DocValuesField, ScriptDocValues.Supplier { private final SortedNumericDocValues input; private final String name; @@ -25,6 +25,8 @@ public class BooleanDocValuesField implements DocValuesField { private boolean[] values = new boolean[0]; private int count; + // used for backwards compatibility for old-style "doc" access + // as a delegate to this field class private ScriptDocValues.Booleans booleans = null; public BooleanDocValuesField(SortedNumericDocValues input, String name) { @@ -32,11 +34,6 @@ public BooleanDocValuesField(SortedNumericDocValues input, String name) { this.name = name; } - /** - * Set the current document ID. - * - * @param docId - */ @Override public void setNextDocId(int docId) throws IOException { if (input.advanceExact(docId)) { @@ -58,11 +55,6 @@ private void resize(int newSize) { } } - /** - * Returns a {@code ScriptDocValues} of the appropriate type for this field. - * This is used to support backwards compatibility for accessing field values - * through the {@code doc} variable. - */ @Override public ScriptDocValues getScriptDocValues() { if (booleans == null) { @@ -72,35 +64,40 @@ public ScriptDocValues getScriptDocValues() { return booleans; } - /** - * Returns the name of this field. - */ + // this method is required to support the Boolean return values + // for the old-style "doc" access in ScriptDocValues + @Override + public Boolean getInternal(int index) { + return values[index]; + } + @Override public String getName() { return name; } - /** - * Returns {@code true} if this field has no values, otherwise {@code false}. - */ @Override public boolean isEmpty() { return count == 0; } - /** - * Returns the number of values this field has. - */ @Override public int size() { return count; } - /** - * Returns an iterator over elements of type {@code T}. - * - * @return an Iterator. - */ + public boolean get(boolean defaultValue) { + return get(0, defaultValue); + } + + public boolean get(int index, boolean defaultValue) { + if (isEmpty() || index < 0 || index >= count) { + return defaultValue; + } + + return values[index]; + } + @Override public Iterator iterator() { return new Iterator() { @@ -120,21 +117,4 @@ public Boolean next() { } }; } - - public boolean get(boolean defaultValue) { - return get(0, defaultValue); - } - - public boolean get(int index, boolean defaultValue) { - if (isEmpty() || index < 0 || index >= count) { - return defaultValue; - } - - return values[index]; - } - - // this method is required to support the old-style "doc" access in ScriptDocValues - public boolean getInternal(int index) { - return values[index]; - } } diff --git a/server/src/main/java/org/elasticsearch/script/field/DelegateDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/DelegateDocValuesField.java index b90920f3fec1f..db0dbe5d07a5c 100644 --- a/server/src/main/java/org/elasticsearch/script/field/DelegateDocValuesField.java +++ b/server/src/main/java/org/elasticsearch/script/field/DelegateDocValuesField.java @@ -23,13 +23,16 @@ public class DelegateDocValuesField implements DocValuesField { private final String name; public DelegateDocValuesField(ScriptDocValues scriptDocValues, String name) { + // Suppliers provided via ScriptDocValues should never be a DocValuesField + // as we expect DelegateDocValuesField to only support old-style ScriptDocValues + assert scriptDocValues.getSupplier() instanceof DocValuesField == false; this.scriptDocValues = scriptDocValues; this.name = name; } @Override public void setNextDocId(int docId) throws IOException { - scriptDocValues.setNextDocId(docId); + scriptDocValues.getSupplier().setNextDocId(docId); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesGeoPointsTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesGeoPointsTests.java index 0428804bc08fc..3d15faa3146bd 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesGeoPointsTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesGeoPointsTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.index.fielddata.ScriptDocValues.GeoPoints; +import org.elasticsearch.index.fielddata.ScriptDocValues.GeoPointsSupplier; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -62,11 +63,11 @@ public void testGeoGetLatLon() throws IOException { GeoPoint[][] points = { { new GeoPoint(lat1, lon1), new GeoPoint(lat2, lon2) } }; final MultiGeoPointValues values = wrap(points); - final ScriptDocValues.GeoPoints script = new ScriptDocValues.GeoPoints(values); + final ScriptDocValues.GeoPoints script = new ScriptDocValues.GeoPoints(new GeoPointsSupplier(values)); - script.setNextDocId(1); + script.getSupplier().setNextDocId(1); assertEquals(true, script.isEmpty()); - script.setNextDocId(0); + script.getSupplier().setNextDocId(0); assertEquals(false, script.isEmpty()); assertEquals(new GeoPoint(lat1, lon1), script.getValue()); assertEquals(lat1, script.getLat(), 0); @@ -80,12 +81,12 @@ public void testGeoDistance() throws IOException { final double lon = randomLon(); GeoPoint[][] points = { { new GeoPoint(lat, lon) } }; final MultiGeoPointValues values = wrap(points); - final ScriptDocValues.GeoPoints script = new ScriptDocValues.GeoPoints(values); - script.setNextDocId(0); + final ScriptDocValues.GeoPoints script = new ScriptDocValues.GeoPoints(new GeoPointsSupplier(values)); + script.getSupplier().setNextDocId(0); GeoPoint[][] points2 = { new GeoPoint[0] }; - final ScriptDocValues.GeoPoints emptyScript = new ScriptDocValues.GeoPoints(wrap(points2)); - emptyScript.setNextDocId(0); + final ScriptDocValues.GeoPoints emptyScript = new ScriptDocValues.GeoPoints(new GeoPointsSupplier(wrap(points2))); + emptyScript.getSupplier().setNextDocId(0); final double otherLat = randomLat(); final double otherLon = randomLon(); @@ -115,9 +116,9 @@ public void testMissingValues() throws IOException { points[d][i] = new GeoPoint(randomLat(), randomLon()); } } - final ScriptDocValues.GeoPoints geoPoints = new GeoPoints(wrap(points)); + final ScriptDocValues.GeoPoints geoPoints = new GeoPoints(new GeoPointsSupplier(wrap(points))); for (int d = 0; d < points.length; d++) { - geoPoints.setNextDocId(d); + geoPoints.getSupplier().setNextDocId(d); if (points[d].length > 0) { assertEquals(points[d][0], geoPoints.getValue()); } else { diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java index 0e8fe7772fbe7..e2460614e275a 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesLongsTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.fielddata; import org.elasticsearch.index.fielddata.ScriptDocValues.Longs; +import org.elasticsearch.index.fielddata.ScriptDocValues.LongsSupplier; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -27,7 +28,7 @@ public void testLongs() throws IOException { for (int round = 0; round < 10; round++) { int d = between(0, values.length - 1); - longs.setNextDocId(d); + longs.getSupplier().setNextDocId(d); if (values[d].length > 0) { assertEquals(values[d][0], longs.getValue()); assertEquals(values[d][0], (long) longs.get(0)); @@ -56,7 +57,7 @@ public void testLongs() throws IOException { } private Longs wrap(long[][] values) { - return new Longs(new AbstractSortedNumericDocValues() { + return new Longs(new LongsSupplier(new AbstractSortedNumericDocValues() { long[] current; int i; @@ -76,6 +77,6 @@ public int docValueCount() { public long nextValue() { return current[i++]; } - }); + })); } } diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/plain/HalfFloatFielddataTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/plain/HalfFloatFielddataTests.java index 0860702068a6e..9790ac29f457d 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/plain/HalfFloatFielddataTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/plain/HalfFloatFielddataTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.ScriptDocValues.Doubles; +import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.script.field.DelegateDocValuesField; @@ -42,7 +43,7 @@ public void testSingleValued() throws IOException { SortedNumericDoubleValues values = new SortedDoublesIndexFieldData.SortedNumericHalfFloatFieldData( reader, "half_float", - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ).getDoubleValues(); assertNotNull(FieldData.unwrapSingleton(values)); assertTrue(values.advanceExact(0)); @@ -67,7 +68,7 @@ public void testMultiValued() throws IOException { SortedNumericDoubleValues values = new SortedDoublesIndexFieldData.SortedNumericHalfFloatFieldData( reader, "half_float", - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ).getDoubleValues(); assertNull(FieldData.unwrapSingleton(values)); assertTrue(values.advanceExact(0)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java index 3547b6b89925d..1a49f2aea0f4c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.LeafNumericFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues.Dates; +import org.elasticsearch.index.fielddata.ScriptDocValues.DatesSupplier; import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.mapper.DateFieldMapper.DateFieldType; import org.elasticsearch.index.mapper.DateFieldMapper.Resolution; @@ -338,7 +339,7 @@ public void testDateNanoDocValues() throws IOException { SortedNumericIndexFieldData fieldData = new SortedNumericIndexFieldData( "my_date", IndexNumericFieldData.NumericType.DATE_NANOSECONDS, - (dv, n) -> new DelegateDocValuesField(new Dates(dv, true), n) + (dv, n) -> new DelegateDocValuesField(new Dates(new DatesSupplier(dv, true)), n) ); // Read index and check the doc values DirectoryReader reader = DirectoryReader.open(w); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java index 76cdfd6fff7b4..ef4a863b45a3b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IpScriptFieldTypeTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.lucene.search.function.ScriptScoreQuery; import org.elasticsearch.index.fielddata.BinaryScriptFieldData; -import org.elasticsearch.index.fielddata.IpScriptFieldData; +import org.elasticsearch.index.fielddata.ScriptDocValues.Strings; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.DocReader; @@ -145,7 +145,7 @@ public ScoreScript newInstance(DocReader docReader) { return new ScoreScript(Map.of(), searchContext.lookup(), docReader) { @Override public double execute(ExplanationHolder explanation) { - IpScriptFieldData.IpScriptDocValues bytes = (IpScriptFieldData.IpScriptDocValues) getDoc().get("test"); + Strings bytes = (Strings) getDoc().get("test"); return Integer.parseInt(bytes.getValue().substring(bytes.getValue().lastIndexOf(".") + 1)); } }; diff --git a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java index 262b7cc062a82..f123deb7f8d8a 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SearchExecutionContextTests.java @@ -500,7 +500,7 @@ public LeafFieldData load(LeafReaderContext context) { return new LeafFieldData() { @Override public DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new ScriptDocValues() { + return new DelegateDocValuesField(new ScriptDocValues(new ScriptDocValues.Supplier() { String value; @Override @@ -509,7 +509,7 @@ public int size() { } @Override - public String get(int index) { + public String getInternal(int index) { assert index == 0; return value; } @@ -521,6 +521,16 @@ public void setNextDocId(int docId) { leafLookup.setDocument(docId); value = runtimeDocValues.apply(leafLookup, docId); } + }) { + @Override + public int size() { + return supplier.size(); + } + + @Override + public String get(int i) { + return supplier.getInternal(i); + } }, name); } @@ -616,7 +626,7 @@ public void collect(int doc) throws IOException { scriptDocValues = indexFieldData.load(context).getScriptField("test").getScriptDocValues(); ; } - scriptDocValues.setNextDocId(doc); + scriptDocValues.getSupplier().setNextDocId(doc); for (int i = 0; i < scriptDocValues.size(); i++) { result.add(scriptDocValues.get(i).toString()); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerTests.java index d50922274ea74..8b2aa19546e47 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedSamplerTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues.Doubles; +import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.fielddata.plain.SortedDoublesIndexFieldData; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -167,7 +168,7 @@ private void testCase( SortedDoublesIndexFieldData fieldData = new SortedDoublesIndexFieldData( "price", IndexNumericFieldData.NumericType.DOUBLE, - (dv, n) -> new DelegateDocValuesField(new Doubles(dv), n) + (dv, n) -> new DelegateDocValuesField(new Doubles(new DoublesSupplier(dv)), n) ); FunctionScoreQuery query = new FunctionScoreQuery( new MatchAllDocsQuery(), diff --git a/server/src/test/java/org/elasticsearch/search/lookup/LeafDocLookupTests.java b/server/src/test/java/org/elasticsearch/search/lookup/LeafDocLookupTests.java index f642d1a41a904..4b85905fdc423 100644 --- a/server/src/test/java/org/elasticsearch/search/lookup/LeafDocLookupTests.java +++ b/server/src/test/java/org/elasticsearch/search/lookup/LeafDocLookupTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import java.io.IOException; import java.util.function.Function; import static org.mockito.AdditionalAnswers.returnsFirstArg; @@ -64,7 +65,7 @@ public void testFieldAliases() { assertEquals(docValues, fetchedDocValues); } - public void testFlattenedField() { + public void testFlattenedField() throws IOException { ScriptDocValues docValues1 = mock(ScriptDocValues.class); IndexFieldData fieldData1 = createFieldData(docValues1, "flattened.key1"); @@ -95,8 +96,13 @@ public void testFlattenedField() { assertEquals(docValues2, docLookup.get("flattened.key2")); } - private IndexFieldData createFieldData(ScriptDocValues scriptDocValues, String name) { - DelegateDocValuesField delegateDocValuesField = new DelegateDocValuesField(scriptDocValues, name); + private IndexFieldData createFieldData(ScriptDocValues scriptDocValues, String name) throws IOException { + DelegateDocValuesField delegateDocValuesField = new DelegateDocValuesField(scriptDocValues, name) { + @Override + public void setNextDocId(int id) { + // do nothing + } + }; LeafFieldData leafFieldData = mock(LeafFieldData.class); doReturn(delegateDocValuesField).when(leafFieldData).getScriptField(name); diff --git a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java index 995e5f70de845..cbb65425f942c 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; +import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.DocumentParserContext; @@ -423,7 +424,10 @@ public double nextValue() throws IOException { @Override public DocValuesField getScriptField(String name) { // getAggregateMetricValues returns all metric as doubles, including `value_count` - return new DelegateDocValuesField(new ScriptDocValues.Doubles(getAggregateMetricValues(defaultMetric)), name); + return new DelegateDocValuesField( + new ScriptDocValues.Doubles(new DoublesSupplier(getAggregateMetricValues(defaultMetric))), + name + ); } @Override diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongDocValuesField.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongDocValuesField.java index 3633b423bdc72..e2f792981e369 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongDocValuesField.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongDocValuesField.java @@ -23,7 +23,7 @@ import static org.elasticsearch.search.DocValueFormat.MASK_2_63; import static org.elasticsearch.xpack.unsignedlong.UnsignedLongFieldMapper.BIGINTEGER_2_64_MINUS_ONE; -public class UnsignedLongDocValuesField implements UnsignedLongField, DocValuesField { +public class UnsignedLongDocValuesField implements DocValuesField, ScriptDocValues.Supplier { private final SortedNumericDocValues input; private final String name; @@ -76,6 +76,13 @@ public boolean isEmpty() { return count == 0; } + // this method is required to support the Long return values + // for the old-style "doc" access in ScriptDocValues + @Override + public Long getInternal(int index) { + return toFormatted(index); + } + @Override public int size() { return count; @@ -89,7 +96,7 @@ protected long toFormatted(int index) { return values[index] ^ MASK_2_63; } - @Override + /** Return all the values as a {@code List}. */ public List getValues() { if (isEmpty()) { return Collections.emptyList(); @@ -104,13 +111,13 @@ public List getValues() { return values; } - @Override - public long getValue(long defaultValue) { + /** Returns the 0th index value as an {@code long} if it exists, otherwise {@code defaultValue}. */ + public long get(long defaultValue) { return getValue(0, defaultValue); } - @Override - public long getValue(int index, long defaultValue) { + /** Returns the value at {@code index} as an {@code long} if it exists, otherwise {@code defaultValue}. */ + public long get(int index, long defaultValue) { if (isEmpty() || index < 0 || index >= count) { return defaultValue; } @@ -118,6 +125,16 @@ public long getValue(int index, long defaultValue) { return toFormatted(index); } + /** Returns the 0th index value as an {@code long} if it exists, otherwise {@code defaultValue}. */ + public long getValue(long defaultValue) { + return get(0, defaultValue); + } + + /** Returns the value at {@code index} as an {@code long} if it exists, otherwise {@code defaultValue}. */ + public long getValue(int index, long defaultValue) { + return get(index, defaultValue); + } + @Override public PrimitiveIterator.OfLong iterator() { return new PrimitiveIterator.OfLong() { @@ -148,7 +165,7 @@ protected BigInteger toBigInteger(int index) { return BigInteger.valueOf(toFormatted(index)).and(BIGINTEGER_2_64_MINUS_ONE); } - @Override + /** Converts all the values to {@code BigInteger} and returns them as a {@code List}. */ public List asBigIntegers() { if (isEmpty()) { return Collections.emptyList(); @@ -163,12 +180,12 @@ public List asBigIntegers() { return values; } - @Override + /** Returns the 0th index value as a {@code BigInteger} if it exists, otherwise {@code defaultValue}. */ public BigInteger asBigInteger(BigInteger defaultValue) { return asBigInteger(0, defaultValue); } - @Override + /** Returns the value at {@code index} as a {@code BigInteger} if it exists, otherwise {@code defaultValue}. */ public BigInteger asBigInteger(int index, BigInteger defaultValue) { if (isEmpty() || index < 0 || index >= count) { return defaultValue; diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongScriptDocValues.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongScriptDocValues.java index 3278e4f165338..dfc1fd23c30eb 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongScriptDocValues.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongScriptDocValues.java @@ -9,37 +9,24 @@ import org.elasticsearch.index.fielddata.ScriptDocValues; -import java.io.IOException; - public class UnsignedLongScriptDocValues extends ScriptDocValues { - private final UnsignedLongDocValuesField unsignedLongDocValuesField; - - /** - * Standard constructor. - */ - public UnsignedLongScriptDocValues(UnsignedLongDocValuesField unsignedLongDocValuesField) { - this.unsignedLongDocValuesField = unsignedLongDocValuesField; - } - - @Override - public void setNextDocId(int docId) throws IOException { - throw new UnsupportedOperationException(); + public UnsignedLongScriptDocValues(Supplier supplier) { + super(supplier); } public long getValue() { - throwIfEmpty(); - return unsignedLongDocValuesField.getValue(0L); // default is ignored + return get(0); } @Override public Long get(int index) { throwIfEmpty(); - return unsignedLongDocValuesField.getValue(0L); // default is ignored + return supplier.getInternal(index); } @Override public int size() { - return unsignedLongDocValuesField.size(); + return supplier.size(); } } diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/resources/org/elasticsearch/xpack/unsignedlong/org.elasticsearch.xpack.unsignedlong.txt b/x-pack/plugin/mapper-unsigned-long/src/main/resources/org/elasticsearch/xpack/unsignedlong/org.elasticsearch.xpack.unsignedlong.txt index 2baa2107b7472..bce3a098a69dc 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/resources/org/elasticsearch/xpack/unsignedlong/org.elasticsearch.xpack.unsignedlong.txt +++ b/x-pack/plugin/mapper-unsigned-long/src/main/resources/org/elasticsearch/xpack/unsignedlong/org.elasticsearch.xpack.unsignedlong.txt @@ -11,14 +11,13 @@ class org.elasticsearch.xpack.unsignedlong.UnsignedLongScriptDocValues { long getValue() } -class org.elasticsearch.xpack.unsignedlong.UnsignedLongField @dynamic_type { +class org.elasticsearch.xpack.unsignedlong.UnsignedLongDocValuesField @dynamic_type { + long get(long) + long get(int, long) long getValue(long) long getValue(int, long) List getValues() BigInteger asBigInteger(BigInteger) BigInteger asBigInteger(int, BigInteger) List asBigIntegers() -} - -class org.elasticsearch.xpack.unsignedlong.UnsignedLongDocValuesField @dynamic_type { } \ No newline at end of file diff --git a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionScriptDocValues.java b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionScriptDocValues.java index 25cd66a0eb212..f20db9bcd852e 100644 --- a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionScriptDocValues.java +++ b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionScriptDocValues.java @@ -15,23 +15,44 @@ public final class VersionScriptDocValues extends ScriptDocValues { - private final SortedSetDocValues in; - private long[] ords = new long[0]; - private int count; + public static final class VersionScriptSupplier implements ScriptDocValues.Supplier { - public VersionScriptDocValues(SortedSetDocValues in) { - this.in = in; - } + private final SortedSetDocValues in; + private long[] ords = new long[0]; + private int count; - @Override - public void setNextDocId(int docId) throws IOException { - count = 0; - if (in.advanceExact(docId)) { - for (long ord = in.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = in.nextOrd()) { - ords = ArrayUtil.grow(ords, count + 1); - ords[count++] = ord; + public VersionScriptSupplier(SortedSetDocValues in) { + this.in = in; + } + + @Override + public void setNextDocId(int docId) throws IOException { + count = 0; + if (in.advanceExact(docId)) { + for (long ord = in.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = in.nextOrd()) { + ords = ArrayUtil.grow(ords, count + 1); + ords[count++] = ord; + } + } + } + + @Override + public String getInternal(int index) { + try { + return VersionEncoder.decodeVersion(in.lookupOrd(ords[index])); + } catch (IOException e) { + throw new RuntimeException(e); } } + + @Override + public int size() { + return count; + } + } + + public VersionScriptDocValues(VersionScriptSupplier supplier) { + super(supplier); } public String getValue() { @@ -40,20 +61,16 @@ public String getValue() { @Override public String get(int index) { - if (count == 0) { + if (supplier.size() == 0) { throw new IllegalStateException( "A document doesn't have a value for a field! " + "Use doc[].size()==0 to check if a document is missing a field!" ); } - try { - return VersionEncoder.decodeVersion(in.lookupOrd(ords[index])); - } catch (IOException e) { - throw new RuntimeException(e); - } + return supplier.getInternal(index); } @Override public int size() { - return count; + return supplier.size(); } } diff --git a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java index fa8a692749b25..97dc728352051 100644 --- a/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java +++ b/x-pack/plugin/mapper-version/src/main/java/org/elasticsearch/xpack/versionfield/VersionStringFieldMapper.java @@ -49,6 +49,7 @@ import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.versionfield.VersionEncoder.EncodedVersion; +import org.elasticsearch.xpack.versionfield.VersionScriptDocValues.VersionScriptSupplier; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -279,7 +280,11 @@ protected BytesRef indexedValueForSearch(Object value) { @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, Supplier searchLookup) { - return new SortedSetOrdinalsIndexFieldData.Builder(name(), VersionScriptDocValues::new, CoreValuesSourceType.KEYWORD); + return new SortedSetOrdinalsIndexFieldData.Builder( + name(), + dv -> new VersionScriptDocValues(new VersionScriptSupplier(dv)), + CoreValuesSourceType.KEYWORD + ); } @Override diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/plain/AbstractAtomicGeoShapeShapeFieldData.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/plain/AbstractAtomicGeoShapeShapeFieldData.java index 69018c3ed0803..5bef8d33fb6db 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/plain/AbstractAtomicGeoShapeShapeFieldData.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/fielddata/plain/AbstractAtomicGeoShapeShapeFieldData.java @@ -15,6 +15,7 @@ import org.elasticsearch.script.field.DelegateDocValuesField; import org.elasticsearch.script.field.DocValuesField; import org.elasticsearch.xpack.spatial.index.fielddata.GeoShapeValues; +import org.elasticsearch.xpack.spatial.index.fielddata.GeoShapeValues.GeoShapeValue; import org.elasticsearch.xpack.spatial.index.fielddata.LeafGeoShapeFieldData; import java.io.IOException; @@ -33,7 +34,7 @@ public final SortedBinaryDocValues getBytesValues() { @Override public final DocValuesField getScriptField(String name) { - return new DelegateDocValuesField(new GeoShapeScriptValues(getGeoShapeValues()), name); + return new DelegateDocValuesField(new GeoShapeScriptValues(new GeoShapeSupplier(getGeoShapeValues())), name); } public static LeafGeoShapeFieldData empty(final int maxDoc) { @@ -59,14 +60,14 @@ public GeoShapeValues getGeoShapeValues() { }; } - private static final class GeoShapeScriptValues extends ScriptDocValues.Geometry { + private static final class GeoShapeSupplier implements ScriptDocValues.GeometrySupplier { private final GeoShapeValues in; private final GeoPoint centroid = new GeoPoint(); private final GeoBoundingBox boundingBox = new GeoBoundingBox(new GeoPoint(), new GeoPoint()); private GeoShapeValues.GeoShapeValue value; - private GeoShapeScriptValues(GeoShapeValues in) { + private GeoShapeSupplier(GeoShapeValues in) { this.in = in; } @@ -82,39 +83,73 @@ public void setNextDocId(int docId) throws IOException { } } + @Override + public GeoShapeValue getInternal(int index) { + throw new UnsupportedOperationException(); + } + + public GeoShapeValue getInternal() { + return value; + } + + @Override + public int size() { + return value == null ? 0 : 1; + } + + @Override + public GeoPoint getCentroid() { + return centroid; + } + + @Override + public GeoBoundingBox getBoundingBox() { + return boundingBox; + } + } + + private static final class GeoShapeScriptValues extends ScriptDocValues.Geometry { + + private final GeoShapeSupplier gsSupplier; + + private GeoShapeScriptValues(GeoShapeSupplier supplier) { + super(supplier); + this.gsSupplier = supplier; + } + @Override public int getDimensionalType() { - return value == null ? -1 : value.dimensionalShapeType().ordinal(); + return gsSupplier.getInternal() == null ? -1 : gsSupplier.getInternal().dimensionalShapeType().ordinal(); } @Override public GeoPoint getCentroid() { - return value == null ? null : centroid; + return gsSupplier.getInternal() == null ? null : gsSupplier.getCentroid(); } @Override public double getMercatorWidth() { - return lonToSphericalMercator(boundingBox.right()) - lonToSphericalMercator(boundingBox.left()); + return lonToSphericalMercator(getBoundingBox().right()) - lonToSphericalMercator(getBoundingBox().left()); } @Override public double getMercatorHeight() { - return latToSphericalMercator(boundingBox.top()) - latToSphericalMercator(boundingBox.bottom()); + return latToSphericalMercator(getBoundingBox().top()) - latToSphericalMercator(getBoundingBox().bottom()); } @Override public GeoBoundingBox getBoundingBox() { - return value == null ? null : boundingBox; + return gsSupplier.getInternal() == null ? null : gsSupplier.getBoundingBox(); } @Override public GeoShapeValues.GeoShapeValue get(int index) { - return value; + return gsSupplier.getInternal(); } @Override public int size() { - return value == null ? 0 : 1; + return supplier.size(); } } } diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValues.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValues.java index 6df3ed449bd16..852b63500a9bf 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValues.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValues.java @@ -17,49 +17,73 @@ public class BinaryDenseVectorScriptDocValues extends DenseVectorScriptDocValues { - private final BinaryDocValues in; + public static class BinaryDenseVectorSupplier implements DenseVectorSupplier { + + private final BinaryDocValues in; + private BytesRef value; + + public BinaryDenseVectorSupplier(BinaryDocValues in) { + this.in = in; + } + + @Override + public void setNextDocId(int docId) throws IOException { + if (in.advanceExact(docId)) { + value = in.binaryValue(); + } else { + value = null; + } + } + + @Override + public BytesRef getInternal(int index) { + throw new UnsupportedOperationException(); + } + + public BytesRef getInternal() { + return value; + } + + @Override + public int size() { + if (value == null) { + return 0; + } else { + return 1; + } + } + } + + private final BinaryDenseVectorSupplier bdvSupplier; private final Version indexVersion; private final float[] vector; - private BytesRef value; - BinaryDenseVectorScriptDocValues(BinaryDocValues in, Version indexVersion, int dims) { - super(dims); - this.in = in; + BinaryDenseVectorScriptDocValues(BinaryDenseVectorSupplier supplier, Version indexVersion, int dims) { + super(supplier, dims); + this.bdvSupplier = supplier; this.indexVersion = indexVersion; this.vector = new float[dims]; } @Override - public void setNextDocId(int docId) throws IOException { - if (in.advanceExact(docId)) { - value = in.binaryValue(); - } else { - value = null; - } + public int size() { + return supplier.size(); } @Override public float[] getVectorValue() { - VectorEncoderDecoder.decodeDenseVector(value, vector); + VectorEncoderDecoder.decodeDenseVector(bdvSupplier.getInternal(), vector); return vector; } @Override public float getMagnitude() { - return VectorEncoderDecoder.getMagnitude(indexVersion, value); - } - - @Override - public int size() { - if (value == null) { - return 0; - } else { - return 1; - } + return VectorEncoderDecoder.getMagnitude(indexVersion, bdvSupplier.getInternal()); } @Override public double dotProduct(float[] queryVector) { + BytesRef value = bdvSupplier.getInternal(); ByteBuffer byteBuffer = ByteBuffer.wrap(value.bytes, value.offset, value.length); double dotProduct = 0; @@ -71,6 +95,7 @@ public double dotProduct(float[] queryVector) { @Override public double l1Norm(float[] queryVector) { + BytesRef value = bdvSupplier.getInternal(); ByteBuffer byteBuffer = ByteBuffer.wrap(value.bytes, value.offset, value.length); double l1norm = 0; @@ -82,6 +107,7 @@ public double l1Norm(float[] queryVector) { @Override public double l2Norm(float[] queryVector) { + BytesRef value = bdvSupplier.getInternal(); ByteBuffer byteBuffer = ByteBuffer.wrap(value.bytes, value.offset, value.length); double l2norm = 0; for (float queryValue : queryVector) { diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/DenseVectorScriptDocValues.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/DenseVectorScriptDocValues.java index 6ebce8541d308..650ebca1d5ee5 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/DenseVectorScriptDocValues.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/DenseVectorScriptDocValues.java @@ -11,11 +11,23 @@ import org.elasticsearch.index.fielddata.ScriptDocValues; public abstract class DenseVectorScriptDocValues extends ScriptDocValues { + + public interface DenseVectorSupplier extends Supplier { + + @Override + default BytesRef getInternal(int index) { + throw new UnsupportedOperationException(); + } + + T getInternal(); + } + public static final String MISSING_VECTOR_FIELD_MESSAGE = "A document doesn't have a value for a vector field!"; private final int dims; - public DenseVectorScriptDocValues(int dims) { + public DenseVectorScriptDocValues(DenseVectorSupplier supplier, int dims) { + super(supplier); this.dims = dims; } @@ -46,8 +58,8 @@ public BytesRef get(int index) { ); } - public static DenseVectorScriptDocValues empty(int dims) { - return new DenseVectorScriptDocValues(dims) { + public static DenseVectorScriptDocValues empty(DenseVectorSupplier supplier, int dims) { + return new DenseVectorScriptDocValues(supplier, dims) { @Override public float[] getVectorValue() { throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); @@ -73,14 +85,9 @@ public double l2Norm(float[] queryVector) { throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); } - @Override - public void setNextDocId(int docId) { - // do nothing - } - @Override public int size() { - return 0; + return supplier.size(); } }; } diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValues.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValues.java index 03afcbf0dd685..fc6f1bdb59906 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValues.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValues.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.vectors.query; import org.apache.lucene.index.VectorValues; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.VectorUtil; import java.io.IOException; @@ -16,36 +17,64 @@ import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS; public class KnnDenseVectorScriptDocValues extends DenseVectorScriptDocValues { - private final VectorValues in; - private float[] vector; - KnnDenseVectorScriptDocValues(VectorValues in, int dims) { - super(dims); - this.in = in; - } + public static class KnnDenseVectorSupplier implements DenseVectorSupplier { - @Override - public void setNextDocId(int docId) throws IOException { - int currentDoc = in.docID(); - if (currentDoc == NO_MORE_DOCS || docId < currentDoc) { - vector = null; - } else if (docId == currentDoc) { - vector = in.vectorValue(); - } else { - currentDoc = in.advance(docId); - if (currentDoc == docId) { + private final VectorValues in; + private float[] vector; + + public KnnDenseVectorSupplier(VectorValues in) { + this.in = in; + } + + @Override + public void setNextDocId(int docId) throws IOException { + int currentDoc = in.docID(); + if (currentDoc == NO_MORE_DOCS || docId < currentDoc) { + vector = null; + } else if (docId == currentDoc) { vector = in.vectorValue(); } else { - vector = null; + currentDoc = in.advance(docId); + if (currentDoc == docId) { + vector = in.vectorValue(); + } else { + vector = null; + } + } + } + + @Override + public BytesRef getInternal(int index) { + throw new UnsupportedOperationException(); + } + + public float[] getInternal() { + return vector; + } + + @Override + public int size() { + if (vector == null) { + return 0; + } else { + return 1; } } } + private final KnnDenseVectorSupplier kdvSupplier; + + KnnDenseVectorScriptDocValues(KnnDenseVectorSupplier supplier, int dims) { + super(supplier, dims); + this.kdvSupplier = supplier; + } + private float[] getVectorChecked() { - if (vector == null) { + if (kdvSupplier.getInternal() == null) { throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); } - return vector; + return kdvSupplier.getInternal(); } @Override @@ -88,10 +117,6 @@ public double l2Norm(float[] queryVector) { @Override public int size() { - if (vector == null) { - return 0; - } else { - return 1; - } + return supplier.size(); } } diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java index 511985b62a58e..e97daf4c2f397 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/ScoreScriptUtils.java @@ -63,7 +63,7 @@ public DenseVectorFunction(ScoreScript scoreScript, List queryVector, St void setNextVector() { try { - docValues.setNextDocId(scoreScript._getDocId()); + docValues.getSupplier().setNextDocId(scoreScript._getDocId()); } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); } diff --git a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/VectorDVLeafFieldData.java b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/VectorDVLeafFieldData.java index e0184303e65f7..1d8c45e9c60c2 100644 --- a/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/VectorDVLeafFieldData.java +++ b/x-pack/plugin/vectors/src/main/java/org/elasticsearch/xpack/vectors/query/VectorDVLeafFieldData.java @@ -17,11 +17,16 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.script.field.DelegateDocValuesField; import org.elasticsearch.script.field.DocValuesField; +import org.elasticsearch.xpack.vectors.query.BinaryDenseVectorScriptDocValues.BinaryDenseVectorSupplier; +import org.elasticsearch.xpack.vectors.query.DenseVectorScriptDocValues.DenseVectorSupplier; +import org.elasticsearch.xpack.vectors.query.KnnDenseVectorScriptDocValues.KnnDenseVectorSupplier; import java.io.IOException; import java.util.Collection; import java.util.Collections; +import static org.elasticsearch.xpack.vectors.query.DenseVectorScriptDocValues.MISSING_VECTOR_FIELD_MESSAGE; + final class VectorDVLeafFieldData implements LeafFieldData { private final LeafReader reader; @@ -59,12 +64,30 @@ public DocValuesField getScriptField(String name) { if (indexed) { VectorValues values = reader.getVectorValues(field); if (values == null || values == VectorValues.EMPTY) { - return new DelegateDocValuesField(DenseVectorScriptDocValues.empty(dims), name); + return new DelegateDocValuesField(DenseVectorScriptDocValues.empty(new DenseVectorSupplier() { + @Override + public float[] getInternal() { + throw new IllegalArgumentException(MISSING_VECTOR_FIELD_MESSAGE); + } + + @Override + public void setNextDocId(int docId) throws IOException { + // do nothing + } + + @Override + public int size() { + return 0; + } + }, dims), name); } - return new DelegateDocValuesField(new KnnDenseVectorScriptDocValues(values, dims), name); + return new DelegateDocValuesField(new KnnDenseVectorScriptDocValues(new KnnDenseVectorSupplier(values), dims), name); } else { BinaryDocValues values = DocValues.getBinary(reader, field); - return new DelegateDocValuesField(new BinaryDenseVectorScriptDocValues(values, indexVersion, dims), name); + return new DelegateDocValuesField( + new BinaryDenseVectorScriptDocValues(new BinaryDenseVectorSupplier(values), indexVersion, dims), + name + ); } } catch (IOException e) { throw new IllegalStateException("Cannot load doc values for vector field!", e); diff --git a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValuesTests.java b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValuesTests.java index 6541ccbd01c4e..2761364e51505 100644 --- a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValuesTests.java +++ b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/BinaryDenseVectorScriptDocValuesTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.Version; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.vectors.mapper.VectorEncoderDecoder; +import org.elasticsearch.xpack.vectors.query.BinaryDenseVectorScriptDocValues.BinaryDenseVectorSupplier; import java.io.IOException; import java.nio.ByteBuffer; @@ -28,9 +29,10 @@ public void testGetVectorValueAndGetMagnitude() throws IOException { for (Version indexVersion : Arrays.asList(Version.V_7_4_0, Version.CURRENT)) { BinaryDocValues docValues = wrap(vectors, indexVersion); - DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(docValues, indexVersion, dims); + BinaryDenseVectorSupplier supplier = new BinaryDenseVectorSupplier(docValues); + DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(supplier, indexVersion, dims); for (int i = 0; i < vectors.length; i++) { - scriptDocValues.setNextDocId(i); + supplier.setNextDocId(i); assertArrayEquals(vectors[i], scriptDocValues.getVectorValue(), 0.0001f); assertEquals(expectedMagnitudes[i], scriptDocValues.getMagnitude(), 0.0001f); } @@ -41,13 +43,14 @@ public void testMissingValues() throws IOException { int dims = 3; float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; BinaryDocValues docValues = wrap(vectors, Version.CURRENT); - DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(docValues, Version.CURRENT, dims); + BinaryDenseVectorSupplier supplier = new BinaryDenseVectorSupplier(docValues); + DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(supplier, Version.CURRENT, dims); - scriptDocValues.setNextDocId(3); - Exception e = expectThrows(IllegalArgumentException.class, () -> scriptDocValues.getVectorValue()); + supplier.setNextDocId(3); + Exception e = expectThrows(IllegalArgumentException.class, scriptDocValues::getVectorValue); assertEquals("A document doesn't have a value for a vector field!", e.getMessage()); - e = expectThrows(IllegalArgumentException.class, () -> scriptDocValues.getMagnitude()); + e = expectThrows(IllegalArgumentException.class, scriptDocValues::getMagnitude); assertEquals("A document doesn't have a value for a vector field!", e.getMessage()); } @@ -55,9 +58,10 @@ public void testGetFunctionIsNotAccessible() throws IOException { int dims = 3; float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; BinaryDocValues docValues = wrap(vectors, Version.CURRENT); - DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(docValues, Version.CURRENT, dims); + BinaryDenseVectorSupplier supplier = new BinaryDenseVectorSupplier(docValues); + DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(supplier, Version.CURRENT, dims); - scriptDocValues.setNextDocId(0); + supplier.setNextDocId(0); Exception e = expectThrows(UnsupportedOperationException.class, () -> scriptDocValues.get(0)); assertThat(e.getMessage(), containsString("accessing a vector field's value through 'get' or 'value' is not supported!")); } @@ -69,9 +73,10 @@ public void testSimilarityFunctions() throws IOException { for (Version indexVersion : Arrays.asList(Version.V_7_4_0, Version.CURRENT)) { BinaryDocValues docValues = wrap(new float[][] { docVector }, indexVersion); - DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(docValues, Version.CURRENT, dims); + BinaryDenseVectorSupplier supplier = new BinaryDenseVectorSupplier(docValues); + DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(supplier, Version.CURRENT, dims); - scriptDocValues.setNextDocId(0); + supplier.setNextDocId(0); assertEquals( "dotProduct result is not equal to the expected value!", diff --git a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/DenseVectorFunctionTests.java b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/DenseVectorFunctionTests.java index 1cd89e4993c7e..0ecd26f08c20c 100644 --- a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/DenseVectorFunctionTests.java +++ b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/DenseVectorFunctionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.Version; import org.elasticsearch.script.ScoreScript; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.vectors.query.BinaryDenseVectorScriptDocValues.BinaryDenseVectorSupplier; import org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.CosineSimilarity; import org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.DotProduct; import org.elasticsearch.xpack.vectors.query.ScoreScriptUtils.L1Norm; @@ -36,7 +37,11 @@ public void testVectorFunctions() { for (Version indexVersion : Arrays.asList(Version.V_7_4_0, Version.CURRENT)) { BinaryDocValues docValues = BinaryDenseVectorScriptDocValuesTests.wrap(new float[][] { docVector }, indexVersion); - DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues(docValues, indexVersion, dims); + DenseVectorScriptDocValues scriptDocValues = new BinaryDenseVectorScriptDocValues( + new BinaryDenseVectorSupplier(docValues), + indexVersion, + dims + ); ScoreScript scoreScript = mock(ScoreScript.class); when(scoreScript.getDoc()).thenReturn(Collections.singletonMap(field, scriptDocValues)); diff --git a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValuesTests.java b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValuesTests.java index 319a98a619bf6..7005e4d7bd531 100644 --- a/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValuesTests.java +++ b/x-pack/plugin/vectors/src/test/java/org/elasticsearch/xpack/vectors/query/KnnDenseVectorScriptDocValuesTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.index.VectorValues; import org.apache.lucene.util.BytesRef; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.vectors.query.KnnDenseVectorScriptDocValues.KnnDenseVectorSupplier; import java.io.IOException; @@ -22,9 +23,10 @@ public void testGetVectorValueAndGetMagnitude() throws IOException { float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; float[] expectedMagnitudes = { 1.7320f, 2.4495f, 3.3166f }; - DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(wrap(vectors), dims); + KnnDenseVectorSupplier supplier = new KnnDenseVectorSupplier(wrap(vectors)); + DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(supplier, dims); for (int i = 0; i < vectors.length; i++) { - scriptDocValues.setNextDocId(i); + supplier.setNextDocId(i); assertArrayEquals(vectors[i], scriptDocValues.getVectorValue(), 0.0001f); assertEquals(expectedMagnitudes[i], scriptDocValues.getMagnitude(), 0.0001f); } @@ -33,9 +35,10 @@ public void testGetVectorValueAndGetMagnitude() throws IOException { public void testMissingValues() throws IOException { int dims = 3; float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; - DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(wrap(vectors), dims); + KnnDenseVectorSupplier supplier = new KnnDenseVectorSupplier(wrap(vectors)); + DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(supplier, dims); - scriptDocValues.setNextDocId(3); + supplier.setNextDocId(3); Exception e = expectThrows(IllegalArgumentException.class, () -> scriptDocValues.getVectorValue()); assertEquals("A document doesn't have a value for a vector field!", e.getMessage()); @@ -46,9 +49,10 @@ public void testMissingValues() throws IOException { public void testGetFunctionIsNotAccessible() throws IOException { int dims = 3; float[][] vectors = { { 1, 1, 1 }, { 1, 1, 2 }, { 1, 1, 3 } }; - DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(wrap(vectors), dims); + KnnDenseVectorSupplier supplier = new KnnDenseVectorSupplier(wrap(vectors)); + DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(supplier, dims); - scriptDocValues.setNextDocId(0); + supplier.setNextDocId(0); Exception e = expectThrows(UnsupportedOperationException.class, () -> scriptDocValues.get(0)); assertThat(e.getMessage(), containsString("accessing a vector field's value through 'get' or 'value' is not supported!")); } @@ -58,8 +62,9 @@ public void testSimilarityFunctions() throws IOException { float[] docVector = new float[] { 230.0f, 300.33f, -34.8988f, 15.555f, -200.0f }; float[] queryVector = new float[] { 0.5f, 111.3f, -13.0f, 14.8f, -156.0f }; - DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(wrap(new float[][] { docVector }), dims); - scriptDocValues.setNextDocId(0); + KnnDenseVectorSupplier supplier = new KnnDenseVectorSupplier(wrap(new float[][] { docVector })); + DenseVectorScriptDocValues scriptDocValues = new KnnDenseVectorScriptDocValues(supplier, dims); + supplier.setNextDocId(0); assertEquals("dotProduct result is not equal to the expected value!", 65425.624, scriptDocValues.dotProduct(queryVector), 0.001); assertEquals("l1norm result is not equal to the expected value!", 485.184, scriptDocValues.l1Norm(queryVector), 0.001); From 43e6cacdb0225e7d92e33fdccab233c6a9c27c21 Mon Sep 17 00:00:00 2001 From: William Brafford Date: Mon, 29 Nov 2021 13:29:03 -0500 Subject: [PATCH 77/88] Strip blocks from settings for reindex targets (#80887) When migrating system features, we copy settings from old indices into the new indices we create before reindexing. However, if we happen to copy a write block, this causes the reindexing to fail. Here, we strip the index block settings before applying settings to new indices. Fixes #80654 --- .../migration/FeatureMigrationIT.java | 24 +++++++++++++++++++ .../upgrades/SystemIndexMigrator.java | 9 ++++++- 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java index fc7a70e3498ec..daa88c8284620 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java @@ -242,6 +242,30 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { ); } + public void testMigrateIndexWithWriteBlock() throws Exception { + createSystemIndexForDescriptor(INTERNAL_UNMANAGED); + + String indexName = Optional.ofNullable(INTERNAL_UNMANAGED.getPrimaryIndex()) + .orElse(INTERNAL_UNMANAGED.getIndexPattern().replace("*", "old")); + client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder().put("index.blocks.write", true)).get(); + + TestPlugin.preMigrationHook.set((state) -> Collections.emptyMap()); + TestPlugin.postMigrationHook.set((state, metadata) -> {}); + + ensureGreen(); + + client().execute(PostFeatureUpgradeAction.INSTANCE, new PostFeatureUpgradeRequest()).get(); + + assertBusy(() -> { + GetFeatureUpgradeStatusResponse statusResp = client().execute( + GetFeatureUpgradeStatusAction.INSTANCE, + new GetFeatureUpgradeStatusRequest() + ).get(); + logger.info(Strings.toString(statusResp)); + assertThat(statusResp.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); + }); + } + public void assertIndexHasCorrectProperties( Metadata metadata, String indexName, diff --git a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java index 6e70c387dd5b1..190b6e9e2148f 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java +++ b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java @@ -459,9 +459,16 @@ private void createIndex(SystemIndexMigrationInfo migrationInfo, ActionListener< migrationInfo.getNextIndexName() ); + Settings.Builder settingsBuilder = Settings.builder(); + if (Objects.nonNull(migrationInfo.getSettings())) { + settingsBuilder.put(migrationInfo.getSettings()); + settingsBuilder.remove("index.blocks.write"); + settingsBuilder.remove("index.blocks.read"); + settingsBuilder.remove("index.blocks.metadata"); + } createRequest.waitForActiveShards(ActiveShardCount.ALL) .mappings(migrationInfo.getMappings()) - .settings(Objects.requireNonNullElse(migrationInfo.getSettings(), Settings.EMPTY)); + .settings(Objects.requireNonNullElse(settingsBuilder.build(), Settings.EMPTY)); metadataCreateIndexService.createIndex(createRequest, listener); } From ca65718923bc886ab0aa5934ecc1cd3b2e3090ba Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 29 Nov 2021 18:47:01 +0000 Subject: [PATCH 78/88] Clarify `unassigned.reason` docs (#81017) Today we indicate that the `unassigned.reason` field in various APIs indicates the reason why a shard is unassigned. This isn't really true, it tells you some information about the event that caused the shard to _become_ unassigned (or which most recently changed its routing table entry while remaining unassigned) but tells you almost nothing about why the shard _is now_ unassigned and how to fix it. That's what the allocation explain API is for. This commit clarifies this point in the docs. Closes #80892 Co-authored-by: James Rodewig --- docs/reference/cat/shards.asciidoc | 14 ++++++++++++-- .../how-to/fix-common-cluster-issues.asciidoc | 9 ++++----- .../rest/action/cat/RestShardsAction.java | 2 +- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/docs/reference/cat/shards.asciidoc b/docs/reference/cat/shards.asciidoc index 3ff2839c8501d..ed9c915e7b7d7 100644 --- a/docs/reference/cat/shards.asciidoc +++ b/docs/reference/cat/shards.asciidoc @@ -249,7 +249,9 @@ Time at which the shard became unassigned in Time (UTC)]. `unassigned.details`, `ud`:: -Details about why the shard became unassigned. +Details about why the shard became unassigned. This does not explain why the +shard is currently unassigned. To understand why a shard is not assigned, use +the <> API. `unassigned.for`, `uf`:: Time at which the shard was requested to be unassigned in @@ -258,16 +260,24 @@ Time (UTC)]. [[reason-unassigned]] `unassigned.reason`, `ur`:: -Reason the shard is unassigned. Returned values are: +Indicates the reason for the last change to the state of this unassigned shard. +This does not explain why the shard is currently unassigned. To understand why +a shard is not assigned, use the <> API. Returned +values include: + * `ALLOCATION_FAILED`: Unassigned as a result of a failed allocation of the shard. * `CLUSTER_RECOVERED`: Unassigned as a result of a full cluster recovery. * `DANGLING_INDEX_IMPORTED`: Unassigned as a result of importing a dangling index. * `EXISTING_INDEX_RESTORED`: Unassigned as a result of restoring into a closed index. +* `FORCED_EMPTY_PRIMARY`: The shard's allocation was last modified by forcing an empty primary using the <> API. +* `INDEX_CLOSED`: Unassigned because the index was closed. * `INDEX_CREATED`: Unassigned as a result of an API creation of an index. * `INDEX_REOPENED`: Unassigned as a result of opening a closed index. +* `MANUAL_ALLOCATION`: The shard's allocation was last modified by the <> API. * `NEW_INDEX_RESTORED`: Unassigned as a result of restoring into a new index. * `NODE_LEFT`: Unassigned as a result of the node hosting it leaving the cluster. +* `NODE_RESTARTING`: Similar to `NODE_LEFT`, except that the node was registered as restarting using the <>. +* `PRIMARY_FAILED`: The shard was initializing as a replica, but the primary shard failed before the initialization completed. * `REALLOCATED_REPLICA`: A better replica location is identified and causes the existing replica allocation to be cancelled. * `REINITIALIZED`: When a shard moves from started back to initializing. * `REPLICA_ADDED`: Unassigned as a result of explicit addition of a replica. diff --git a/docs/reference/how-to/fix-common-cluster-issues.asciidoc b/docs/reference/how-to/fix-common-cluster-issues.asciidoc index 9750e37d3be8a..cc778ea0d79e4 100644 --- a/docs/reference/how-to/fix-common-cluster-issues.asciidoc +++ b/docs/reference/how-to/fix-common-cluster-issues.asciidoc @@ -419,12 +419,11 @@ GET _cat/shards?v=true&h=index,shard,prirep,state,node,unassigned.reason&s=state ---- Unassigned shards have a `state` of `UNASSIGNED`. The `prirep` value is `p` for -primary shards and `r` for replicas. The `unassigned.reason` describes why the -shard remains unassigned. +primary shards and `r` for replicas. -To get a more in-depth explanation of an unassigned shard's allocation status, -use the <>. You -can often use details in the response to resolve the issue. +To understand why an unassigned shard is not being assigned and what action +you must take to allow {es} to assign it, use the +<>. [source,console] ---- diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 29aba3d25d869..efac1431236cd 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -110,7 +110,7 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("sync_id", "alias:sync_id;default:false;desc:sync id"); - table.addCell("unassigned.reason", "alias:ur;default:false;desc:reason shard is unassigned"); + table.addCell("unassigned.reason", "alias:ur;default:false;desc:reason shard became unassigned"); table.addCell("unassigned.at", "alias:ua;default:false;desc:time shard became unassigned (UTC)"); table.addCell("unassigned.for", "alias:uf;default:false;text-align:right;desc:time has been unassigned"); table.addCell("unassigned.details", "alias:ud;default:false;desc:additional details as to why the shard became unassigned"); From 256521eafe1af7c363f5cfbbd64a22adbd3760bb Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 29 Nov 2021 19:49:00 +0100 Subject: [PATCH 79/88] Delegate Ref Counting to ByteBuf in Netty Transport (#81096) Tracking down recent memory leaks was made unnecessarily hard by wrapping the `ByteBuf` ref couting with our own counter. This way, we would not record the increments and decrements on the Netty leak tracker, making it useless as far as identifying the concrete source of a request with the logged leak only containing touch points up until our inbound handler code. --- .../main/java/org/elasticsearch/nio/Page.java | 4 +- .../netty4/Netty4MessageChannelHandler.java | 42 ++++++++++++++++++- .../bytes/ReleasableBytesReference.java | 42 +++++++++---------- .../transport/InboundAggregatorTests.java | 18 ++++---- .../transport/InboundDecoderTests.java | 28 ++++++++----- .../transport/InboundPipelineTests.java | 2 +- .../GetCcrRestoreFileChunkAction.java | 3 +- 7 files changed, 90 insertions(+), 49 deletions(-) diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/Page.java b/libs/nio/src/main/java/org/elasticsearch/nio/Page.java index bc85e7dfb27f2..388ac35ea4ad3 100644 --- a/libs/nio/src/main/java/org/elasticsearch/nio/Page.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/Page.java @@ -28,7 +28,7 @@ public Page(ByteBuffer byteBuffer, Releasable closeable) { } private Page(ByteBuffer byteBuffer, RefCountedCloseable refCountedCloseable) { - assert refCountedCloseable.refCount() > 0; + assert refCountedCloseable.hasReferences(); this.byteBuffer = byteBuffer; this.refCountedCloseable = refCountedCloseable; } @@ -51,7 +51,7 @@ public Page duplicate() { * @return the byte buffer */ public ByteBuffer byteBuffer() { - assert refCountedCloseable.refCount() > 0; + assert refCountedCloseable.hasReferences(); return byteBuffer; } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java index 63ccf33561b88..516889b29f8ce 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4MessageChannelHandler.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasables; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.InboundPipeline; @@ -68,7 +69,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception final ByteBuf buffer = (ByteBuf) msg; Netty4TcpChannel channel = ctx.channel().attr(Netty4Transport.CHANNEL_KEY).get(); final BytesReference wrapped = Netty4Utils.toBytesReference(buffer); - try (ReleasableBytesReference reference = new ReleasableBytesReference(wrapped, buffer::release)) { + try (ReleasableBytesReference reference = new ReleasableBytesReference(wrapped, new ByteBufRefCounted(buffer))) { pipeline.handleBytes(channel, reference); } } @@ -211,4 +212,43 @@ void failAsClosedChannel() { buf.release(); } } + + private static final class ByteBufRefCounted implements RefCounted { + + private final ByteBuf buffer; + + ByteBufRefCounted(ByteBuf buffer) { + this.buffer = buffer; + } + + @Override + public void incRef() { + buffer.retain(); + } + + @Override + public boolean tryIncRef() { + if (hasReferences() == false) { + return false; + } + try { + buffer.retain(); + } catch (RuntimeException e) { + assert hasReferences() == false; + return false; + } + return true; + } + + @Override + public boolean decRef() { + return buffer.release(); + } + + @Override + public boolean hasReferences() { + return buffer.refCnt() > 0; + } + } + } diff --git a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java index c1dce4db7cf5c..07723ef5bcffe 100644 --- a/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java +++ b/server/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java @@ -31,7 +31,7 @@ public final class ReleasableBytesReference implements RefCounted, Releasable, B private static final ReleasableBytesReference EMPTY = new ReleasableBytesReference(BytesArray.EMPTY, NO_OP); private final BytesReference delegate; - private final AbstractRefCounted refCounted; + private final RefCounted refCounted; public static ReleasableBytesReference empty() { EMPTY.incRef(); @@ -42,10 +42,10 @@ public ReleasableBytesReference(BytesReference delegate, Releasable releasable) this(delegate, new RefCountedReleasable(releasable)); } - public ReleasableBytesReference(BytesReference delegate, AbstractRefCounted refCounted) { + public ReleasableBytesReference(BytesReference delegate, RefCounted refCounted) { this.delegate = delegate; this.refCounted = refCounted; - assert refCounted.refCount() > 0; + assert refCounted.hasReferences(); } public static ReleasableBytesReference wrap(BytesReference reference) { @@ -53,10 +53,6 @@ public static ReleasableBytesReference wrap(BytesReference reference) { return reference.length() == 0 ? empty() : new ReleasableBytesReference(reference, NO_OP); } - public int refCount() { - return refCounted.refCount(); - } - @Override public void incRef() { refCounted.incRef(); @@ -98,19 +94,19 @@ public void close() { @Override public byte get(int index) { - assert refCount() > 0; + assert hasReferences(); return delegate.get(index); } @Override public int getInt(int index) { - assert refCount() > 0; + assert hasReferences(); return delegate.getInt(index); } @Override public int indexOf(byte marker, int from) { - assert refCount() > 0; + assert hasReferences(); return delegate.indexOf(marker, from); } @@ -121,7 +117,7 @@ public int length() { @Override public BytesReference slice(int from, int length) { - assert refCount() > 0; + assert hasReferences(); return delegate.slice(from, length); } @@ -132,7 +128,7 @@ public long ramBytesUsed() { @Override public StreamInput streamInput() throws IOException { - assert refCount() > 0; + assert hasReferences(); return new BytesReferenceStreamInput(this) { @Override public ReleasableBytesReference readReleasableBytesReference() throws IOException { @@ -148,37 +144,37 @@ public ReleasableBytesReference readReleasableBytesReference() throws IOExceptio @Override public void writeTo(OutputStream os) throws IOException { - assert refCount() > 0; + assert hasReferences(); delegate.writeTo(os); } @Override public String utf8ToString() { - assert refCount() > 0; + assert hasReferences(); return delegate.utf8ToString(); } @Override public BytesRef toBytesRef() { - assert refCount() > 0; + assert hasReferences(); return delegate.toBytesRef(); } @Override public BytesRefIterator iterator() { - assert refCount() > 0; + assert hasReferences(); return delegate.iterator(); } @Override public int compareTo(BytesReference o) { - assert refCount() > 0; + assert hasReferences(); return delegate.compareTo(o); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - assert refCount() > 0; + assert hasReferences(); return delegate.toXContent(builder, params); } @@ -189,31 +185,31 @@ public boolean isFragment() { @Override public boolean equals(Object obj) { - assert refCount() > 0; + assert hasReferences(); return delegate.equals(obj); } @Override public int hashCode() { - assert refCount() > 0; + assert hasReferences(); return delegate.hashCode(); } @Override public boolean hasArray() { - assert refCount() > 0; + assert hasReferences(); return delegate.hasArray(); } @Override public byte[] array() { - assert refCount() > 0; + assert hasReferences(); return delegate.array(); } @Override public int arrayOffset() { - assert refCount() > 0; + assert hasReferences(); return delegate.arrayOffset(); } diff --git a/server/src/test/java/org/elasticsearch/transport/InboundAggregatorTests.java b/server/src/test/java/org/elasticsearch/transport/InboundAggregatorTests.java index ff5a7938a3ead..97e7dddc720ca 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundAggregatorTests.java @@ -91,11 +91,11 @@ public void testInboundAggregation() throws IOException { assertThat(aggregated.getHeader().getRequestId(), equalTo(requestId)); assertThat(aggregated.getHeader().getVersion(), equalTo(Version.CURRENT)); for (ReleasableBytesReference reference : references) { - assertEquals(1, reference.refCount()); + assertTrue(reference.hasReferences()); } aggregated.close(); for (ReleasableBytesReference reference : references) { - assertEquals(0, reference.refCount()); + assertFalse(reference.hasReferences()); } } @@ -111,7 +111,7 @@ public void testInboundUnknownAction() throws IOException { final ReleasableBytesReference content = ReleasableBytesReference.wrap(bytes); aggregator.aggregate(content); content.close(); - assertEquals(0, content.refCount()); + assertFalse(content.hasReferences()); // Signal EOS InboundMessage aggregated = aggregator.finishAggregation(); @@ -139,7 +139,7 @@ public void testCircuitBreak() throws IOException { // Signal EOS InboundMessage aggregated1 = aggregator.finishAggregation(); - assertEquals(0, content1.refCount()); + assertFalse(content1.hasReferences()); assertThat(aggregated1, notNullValue()); assertTrue(aggregated1.isShortCircuit()); assertThat(aggregated1.getException(), instanceOf(CircuitBreakingException.class)); @@ -158,7 +158,7 @@ public void testCircuitBreak() throws IOException { // Signal EOS InboundMessage aggregated2 = aggregator.finishAggregation(); - assertEquals(1, content2.refCount()); + assertTrue(content2.hasReferences()); assertThat(aggregated2, notNullValue()); assertFalse(aggregated2.isShortCircuit()); @@ -177,7 +177,7 @@ public void testCircuitBreak() throws IOException { // Signal EOS InboundMessage aggregated3 = aggregator.finishAggregation(); - assertEquals(1, content3.refCount()); + assertTrue(content3.hasReferences()); assertThat(aggregated3, notNullValue()); assertFalse(aggregated3.isShortCircuit()); } @@ -211,7 +211,7 @@ public void testCloseWillCloseContent() { aggregator.close(); for (ReleasableBytesReference reference : references) { - assertEquals(0, reference.refCount()); + assertFalse(reference.hasReferences()); } } @@ -244,10 +244,10 @@ public void testFinishAggregationWillFinishHeader() throws IOException { assertFalse(header.needsToReadVariableHeader()); assertEquals(actionName, header.getActionName()); if (unknownAction) { - assertEquals(0, content.refCount()); + assertFalse(content.hasReferences()); assertTrue(aggregated.isShortCircuit()); } else { - assertEquals(1, content.refCount()); + assertTrue(content.hasReferences()); assertFalse(aggregated.isShortCircuit()); } } diff --git a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java index ed828607732ef..65e3cb1ad4325 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java @@ -80,7 +80,7 @@ public void testDecode() throws IOException { final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(totalBytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(totalHeaderSize, bytesConsumed); - assertEquals(1, releasable1.refCount()); + assertTrue(releasable1.hasReferences()); final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); @@ -108,7 +108,10 @@ public void testDecode() throws IOException { assertEquals(messageBytes, content); // Ref count is incremented since the bytes are forwarded as a fragment - assertEquals(2, releasable2.refCount()); + assertTrue(releasable2.hasReferences()); + releasable2.decRef(); + assertTrue(releasable2.hasReferences()); + assertTrue(releasable2.decRef()); assertEquals(InboundDecoder.END_CONTENT, endMarker); } @@ -141,7 +144,7 @@ public void testDecodePreHeaderSizeVariableInt() throws IOException { final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(totalBytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(partialHeaderSize, bytesConsumed); - assertEquals(1, releasable1.refCount()); + assertTrue(releasable1.hasReferences()); final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); @@ -198,7 +201,7 @@ public void testDecodeHandshakeCompatibility() throws IOException { final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(bytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(totalHeaderSize, bytesConsumed); - assertEquals(1, releasable1.refCount()); + assertTrue(releasable1.hasReferences()); final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); @@ -247,7 +250,7 @@ public void testCompressedDecode() throws IOException { final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(totalBytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(totalHeaderSize, bytesConsumed); - assertEquals(1, releasable1.refCount()); + assertTrue(releasable1.hasReferences()); final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); @@ -279,7 +282,7 @@ public void testCompressedDecode() throws IOException { assertThat(content, instanceOf(ReleasableBytesReference.class)); ((ReleasableBytesReference) content).close(); // Ref count is not incremented since the bytes are immediately consumed on decompression - assertEquals(1, releasable2.refCount()); + assertTrue(releasable2.hasReferences()); assertEquals(InboundDecoder.END_CONTENT, endMarker); } @@ -311,7 +314,7 @@ public void testCompressedDecodeHandshakeCompatibility() throws IOException { final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(bytes); int bytesConsumed = decoder.decode(releasable1, fragments::add); assertEquals(totalHeaderSize, bytesConsumed); - assertEquals(1, releasable1.refCount()); + assertTrue(releasable1.hasReferences()); final Header header = (Header) fragments.get(0); assertEquals(requestId, header.getRequestId()); @@ -339,16 +342,19 @@ public void testVersionIncompatibilityDecodeException() throws IOException { Compression.Scheme.DEFLATE ); + final ReleasableBytesReference releasable1; try (RecyclerBytesStreamOutput os = new RecyclerBytesStreamOutput(recycler)) { final BytesReference bytes = message.serialize(os); InboundDecoder decoder = new InboundDecoder(Version.CURRENT, recycler); final ArrayList fragments = new ArrayList<>(); - final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(bytes); - expectThrows(IllegalStateException.class, () -> decoder.decode(releasable1, fragments::add)); - // No bytes are retained - assertEquals(1, releasable1.refCount()); + try (ReleasableBytesReference r = ReleasableBytesReference.wrap(bytes)) { + releasable1 = r; + expectThrows(IllegalStateException.class, () -> decoder.decode(releasable1, fragments::add)); + } } + // No bytes are retained + assertFalse(releasable1.hasReferences()); } public void testEnsureVersionCompatibility() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java index ad26b60518c3c..2de79ce854187 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java @@ -184,7 +184,7 @@ public void testPipelineHandling() throws IOException { } for (ReleasableBytesReference released : toRelease) { - assertEquals(0, released.refCount()); + assertFalse(released.hasReferences()); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java index 271f275c88514..2698a3dfccb6a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/GetCcrRestoreFileChunkAction.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ByteArray; -import org.elasticsearch.core.RefCounted; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportActionProxy; @@ -80,7 +79,7 @@ protected void doExecute( } } - public static class GetCcrRestoreFileChunkResponse extends ActionResponse implements RefCounted { + public static class GetCcrRestoreFileChunkResponse extends ActionResponse { private final long offset; private final ReleasableBytesReference chunk; From 76e935efd8328bf4124af77b73663110f4816f93 Mon Sep 17 00:00:00 2001 From: Mark Tozzi Date: Mon, 29 Nov 2021 14:55:56 -0500 Subject: [PATCH 80/88] Fail shards early when we can detect a type missmatch (#79869) Resolves #72276 Generally speaking, we can't detect field type mismatches between different shards until reduce time, which then causes us to fail the whole aggregation. There is an exception though when the user has specified a value type. Since the value type gets pushed out to all the shards, we can detect on the shard if the field type doesn't match the specified value type, and fail only that shard allowing for a partial result on the aggregation. In the case where the user supplies a script as well, we don't fail the shard, because it's possible the script changes the type (this was a pattern before runtime fields) --- .../test/search.aggregation/20_terms.yml | 140 ++++++++++++ .../aggregations/support/FieldContext.java | 4 + .../support/ValuesSourceConfig.java | 21 +- .../terms/BinaryTermsAggregatorTests.java | 2 +- .../support/ValuesSourceConfigTests.java | 200 +++++++++++++++++- .../index/mapper/MapperServiceTestCase.java | 54 ++++- 6 files changed, 409 insertions(+), 12 deletions(-) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml index 7d4ad735fa96d..eb871da38db0b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.aggregation/20_terms.yml @@ -1368,3 +1368,143 @@ huge size: - match: { aggregations.str_terms.buckets.1.doc_count: 2 } - match: { aggregations.str_terms.buckets.2.key: c } - match: { aggregations.str_terms.buckets.2.doc_count: 3 } + +--- +Value type mismatch fails shard: + - skip: + version: " - 8.0.99" + reason: "Fixed in 8.1" + + - do: + indices.create: + index: valuetype_test_1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + ip: + type: keyword + - do: + indices.create: + index: valuetype_test_2 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + ip: + type: ip + + - do: + bulk: + index: valuetype_test_1 + refresh: true + body: | + { "index": {} } + { "ip": "192.168.7.1" } + { "index": {} } + { "ip": "192.168.7.2" } + { "index": {} } + { "ip": "192.168.7.3" } + + - do: + bulk: + index: valuetype_test_2 + refresh: true + body: | + { "index": {} } + { "ip": "127.0.0.1" } + { "index": {} } + { "ip": "192.168.0.1" } + { "index": {} } + { "ip": "192.168.0.2" } + { "index": {} } + { "ip": "192.168.0.3" } + - do: + search: + index: valuetype_test_1,valuetype_test_2 + body: + size: 0 + aggs: + str_terms: + terms: + field: ip + value_type: ip + + - match: { _shards.failed: 1 } + - length: { aggregations.str_terms.buckets: 4 } + - match: { aggregations.str_terms.buckets.0.key: "127.0.0.1" } + - match: { aggregations.str_terms.buckets.0.doc_count: 1 } + - match: { aggregations.str_terms.buckets.1.key: "192.168.0.1" } + - match: { aggregations.str_terms.buckets.1.doc_count: 1 } + - match: { aggregations.str_terms.buckets.2.key: "192.168.0.2" } + - match: { aggregations.str_terms.buckets.2.doc_count: 1 } + - match: { aggregations.str_terms.buckets.3.key: "192.168.0.3" } + - match: { aggregations.str_terms.buckets.3.doc_count: 1 } + +--- +Value type mismatch fails shard with no docs: + - skip: + version: " - 8.0.99" + reason: "Fixed in 8.1" + + - do: + indices.create: + index: valuetype_test_1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + ip: + type: keyword + - do: + indices.create: + index: valuetype_test_2 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + ip: + type: ip + + - do: + bulk: + index: valuetype_test_2 + refresh: true + body: | + { "index": {} } + { "ip": "127.0.0.1" } + { "index": {} } + { "ip": "192.168.0.1" } + { "index": {} } + { "ip": "192.168.0.2" } + { "index": {} } + { "ip": "192.168.0.3" } + - do: + search: + index: valuetype_test_1,valuetype_test_2 + body: + size: 0 + aggs: + str_terms: + terms: + field: ip + value_type: ip + + - match: { _shards.failed: 1 } + - length: { aggregations.str_terms.buckets: 4 } + - match: { aggregations.str_terms.buckets.0.key: "127.0.0.1" } + - match: { aggregations.str_terms.buckets.0.doc_count: 1 } + - match: { aggregations.str_terms.buckets.1.key: "192.168.0.1" } + - match: { aggregations.str_terms.buckets.1.doc_count: 1 } + - match: { aggregations.str_terms.buckets.2.key: "192.168.0.2" } + - match: { aggregations.str_terms.buckets.2.doc_count: 1 } + - match: { aggregations.str_terms.buckets.3.key: "192.168.0.3" } + - match: { aggregations.str_terms.buckets.3.doc_count: 1 } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java index 87c1f3d645293..101e94b6717c4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/FieldContext.java @@ -47,4 +47,8 @@ public MappedFieldType fieldType() { return fieldType; } + public String getTypeName() { + return fieldType.typeName(); + } + } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index 7483419872ef3..12a20da6ae5cc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -148,7 +148,26 @@ private static ValuesSourceConfig internalResolve( if (valuesSourceType == null) { // We have a field, and the user didn't specify a type, so get the type from the field valuesSourceType = fieldResolver.getValuesSourceType(fieldContext, userValueTypeHint, defaultValueSourceType); - } + } else if (valuesSourceType != fieldResolver.getValuesSourceType(fieldContext, userValueTypeHint, defaultValueSourceType) + && script == null) { + /* + * This is the case where the user has specified the type they expect, but we found a field of a different type. + * Usually this happens because of a mapping error, e.g. an older index mapped an IP address as a keyword. If + * the aggregation proceeds, it will usually break during reduction and return no results. So instead, we fail the + * shard with the conflict at this point, allowing the correctly mapped shards to return results with a partial + * failure. + * + * Note that if a script is specified, the assumption is that the script adapts the field into the specified type, + * and we allow the aggregation to continue. + */ + throw new IllegalArgumentException( + "Field type [" + + fieldContext.getTypeName() + + "] is incompatible with specified value_type [" + + userValueTypeHint + + "]" + ); + } } } if (valuesSourceType == null) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java index 80c8909cd2129..cc5abceb5c588 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/BinaryTermsAggregatorTests.java @@ -124,7 +124,7 @@ public void testBadUserValueTypeHint() throws IOException { ValueType.NUMERIC // numeric type hint ) ); - assertThat(e.getMessage(), equalTo("Expected numeric type on field [binary], but got [binary]")); + assertThat(e.getMessage(), equalTo("Field type [binary] is incompatible with specified value_type [numeric]")); } private void testSearchCase( diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfigTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfigTests.java index 027c0a9a3cc26..928c8c77e0e42 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfigTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfigTests.java @@ -14,11 +14,209 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperServiceTestCase; +import org.elasticsearch.script.AggregationScript; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.mockito.Mockito; import java.util.List; -// TODO: This whole set of tests needs to be rethought. public class ValuesSourceConfigTests extends MapperServiceTestCase { + + @Override + @SuppressWarnings("unchecked") + protected T compileScript(Script script, ScriptContext context) { + AggregationScript.Factory mockFactory = Mockito.mock(AggregationScript.Factory.class); + Mockito.when(mockFactory.newFactory(Mockito.any(), Mockito.any())).thenReturn(Mockito.mock(AggregationScript.LeafFactory.class)); + return (T) mockFactory; + } + + /** + * Attempting to resolve a config with neither a field nor a script specified throws an error + */ + public void testNoFieldNoScript() { + expectThrows( + IllegalStateException.class, + () -> ValuesSourceConfig.resolve(null, null, null, null, null, null, null, CoreValuesSourceType.KEYWORD) + ); + } + + /** + * When there's an unmapped field with no script, we should use the user value type hint if available, and fall back to the default + * value source type if it's not available. + */ + public void testUnmappedFieldNoScript() throws Exception { + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "long"))); + // No value type hint + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve(context, null, "UnmappedField", null, null, null, null, CoreValuesSourceType.KEYWORD); + assertEquals(CoreValuesSourceType.KEYWORD, config.valueSourceType()); + }); + + // With value type hint + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + ValueType.IP, + "UnmappedField", + null, + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.IP, config.valueSourceType()); + }); + } + + /** + * When the field is mapped and there's no script and no hint, use the field type + */ + public void testMappedFieldNoScriptNoHint() throws Exception { + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "long"))); + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve(context, null, "field", null, null, null, null, CoreValuesSourceType.KEYWORD); + assertEquals(CoreValuesSourceType.NUMERIC, config.valueSourceType()); + }); + } + + /** + * When we have a mapped field and a hint, but no script, we should throw if the hint doesn't match the field, + * and use the type of both if they do match. Note that when there is a script, we just use the user value type + * regardless of the field type. This is to allow for scripts that adapt types, even though runtime fields are + * a better solution for that in every way. + */ + public void testMappedFieldNoScriptWithHint() throws Exception { + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "long"))); + // not matching case + expectThrows( + IllegalArgumentException.class, + () -> withAggregationContext( + mapperService, + List.of(source(b -> b.field("field", 42))), + context -> ValuesSourceConfig.resolve(context, ValueType.IP, "field", null, null, null, null, CoreValuesSourceType.KEYWORD) + ) + ); + + // matching case + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve(context, ValueType.NUMBER, "field", null, null, null, null, CoreValuesSourceType.KEYWORD); + assertEquals(CoreValuesSourceType.NUMERIC, config.valueSourceType()); + }); + } + + /** + * When there's a script and the user tells us what type it produces, always use that type, regardless of if there's also a field + */ + public void testScriptWithHint() throws Exception { + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "long"))); + // With field + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + ValueType.IP, + "field", + mockScript("mockscript"), + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.IP, config.valueSourceType()); + }, () -> null); + + // With unmapped field + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + ValueType.IP, + "unmappedField", + mockScript("mockscript"), + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.IP, config.valueSourceType()); + }, () -> null); + + // Without field + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + ValueType.IP, + null, + mockScript("mockscript"), + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.IP, config.valueSourceType()); + }, () -> null); + } + + /** + * If there's a script and the user didn't tell us what type it produces, use the field if possible, otherwise the default + */ + public void testScriptNoHint() throws Exception { + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "long"))); + // With field + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + null, + "field", + mockScript("mockscript"), + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.NUMERIC, config.valueSourceType()); + }, () -> null); + + // With unmapped field + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + null, + "unmappedField", + mockScript("mockscript"), + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.KEYWORD, config.valueSourceType()); + }, () -> null); + + // Without field + withAggregationContext(mapperService, List.of(source(b -> b.field("field", 42))), context -> { + ValuesSourceConfig config; + config = ValuesSourceConfig.resolve( + context, + null, + null, + mockScript("mockscript"), + null, + null, + null, + CoreValuesSourceType.KEYWORD + ); + assertEquals(CoreValuesSourceType.KEYWORD, config.valueSourceType()); + }, () -> null); + } + public void testKeyword() throws Exception { MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "keyword"))); withAggregationContext(mapperService, List.of(source(b -> b.field("field", "abc"))), context -> { diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 5db980f7a2cc8..c8e45b619bf92 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -177,10 +177,6 @@ protected final MapperService createMapperService( return mapperService; } - protected T compileScript(Script script, ScriptContext context) { - throw new UnsupportedOperationException("Cannot compile script " + Strings.toString(script)); - } - protected final MapperService createMapperService(Version version, Settings settings, BooleanSupplier idFieldDataEnabled) { IndexSettings indexSettings = createIndexSettings(version, settings); MapperRegistry mapperRegistry = new IndicesModule( @@ -200,6 +196,14 @@ protected final MapperService createMapperService(Version version, Settings sett ); } + /** + * This is the injection point for tests that require mock scripts. Test cases should override this to return the + * mock script factory of their choice. + */ + protected T compileScript(Script script, ScriptContext context) { + throw new UnsupportedOperationException("Cannot compile script " + Strings.toString(script)); + } + protected static IndexSettings createIndexSettings(Version version, Settings settings) { settings = Settings.builder() .put("index.number_of_replicas", 0) @@ -329,7 +333,8 @@ private AggregationContext aggregationContext( ValuesSourceRegistry valuesSourceRegistry, MapperService mapperService, IndexSearcher searcher, - Query query + Query query, + Supplier lookupSupplier ) { return new AggregationContext() { private final CircuitBreaker breaker = mock(CircuitBreaker.class); @@ -383,7 +388,7 @@ public boolean isFieldMapped(String field) { @Override public SearchLookup lookup() { - throw new UnsupportedOperationException(); + return lookupSupplier.get(); } @Override @@ -407,8 +412,9 @@ public Set getMatchingFieldNames(String pattern) { } @Override + @SuppressWarnings("unchecked") public FactoryType compile(Script script, ScriptContext context) { - throw new UnsupportedOperationException(); + return compileScript(script, context); } @Override @@ -518,7 +524,16 @@ protected final void withAggregationContext( List docs, CheckedConsumer test ) throws IOException { - withAggregationContext(null, mapperService, docs, null, test); + withAggregationContext(mapperService, docs, test, () -> { throw new UnsupportedOperationException(); }); + } + + protected final void withAggregationContext( + MapperService mapperService, + List docs, + CheckedConsumer test, + Supplier lookupSupplier + ) throws IOException { + withAggregationContext(null, mapperService, docs, null, test, lookupSupplier); } protected final void withAggregationContext( @@ -527,12 +542,33 @@ protected final void withAggregationContext( List docs, Query query, CheckedConsumer test + ) throws IOException { + withAggregationContext( + valuesSourceRegistry, + mapperService, + docs, + query, + test, + () -> { throw new UnsupportedOperationException(); } + ); + } + + protected final void withAggregationContext( + ValuesSourceRegistry valuesSourceRegistry, + MapperService mapperService, + List docs, + Query query, + CheckedConsumer test, + Supplier lookupSupplier ) throws IOException { withLuceneIndex(mapperService, writer -> { for (SourceToParse doc : docs) { writer.addDocuments(mapperService.documentMapper().parse(doc).docs()); + } - }, reader -> test.accept(aggregationContext(valuesSourceRegistry, mapperService, new IndexSearcher(reader), query))); + }, + reader -> test.accept(aggregationContext(valuesSourceRegistry, mapperService, new IndexSearcher(reader), query, lookupSupplier)) + ); } protected SearchExecutionContext createSearchExecutionContext(MapperService mapperService) { From 85b3435100282508a866ebbcb15b4f99d4df23f8 Mon Sep 17 00:00:00 2001 From: Rory Hunter Date: Mon, 29 Nov 2021 20:10:16 +0000 Subject: [PATCH 81/88] Fix shadowed vars pt7 (#80996) Part of #19752. Fix more instances where local variable names were shadowing field names. Also modify our fork of HiddenFieldCheck to add the ignoreConstructorBody and ignoredMethodNames parameters, so that the check can ignore more matches. --- .../internal/checkstyle/HiddenFieldCheck.java | 62 ++++++++++++++++++- .../src/main/resources/checkstyle.xml | 4 +- .../ccr/action/ShardFollowNodeTaskTests.java | 6 +- .../index/engine/FollowingEngineTests.java | 49 ++++++++------- .../AutoFollowStatsMonitoringDocTests.java | 2 +- .../ccr/FollowStatsMonitoringDocTests.java | 8 +-- .../action/GetRollupIndexCapsAction.java | 2 +- .../xpack/core/scheduler/Cron.java | 2 +- .../search/action/AsyncSearchResponse.java | 8 +-- .../core/security/CommandLineHttpClient.java | 4 +- .../action/InvalidateApiKeyRequest.java | 10 +-- .../security/action/role/PutRoleRequest.java | 12 ++-- .../accesscontrol/IndicesAccessControl.java | 28 +++++---- .../authz/permission/FieldPermissions.java | 4 +- .../authz/permission/IndicesPermission.java | 10 +-- .../authz/permission/ResourcePrivileges.java | 4 +- .../core/slm/SnapshotLifecyclePolicy.java | 10 +-- .../slm/SnapshotLifecyclePolicyMetadata.java | 8 +-- .../core/slm/SnapshotLifecycleStats.java | 6 +- .../core/ssl/SSLConfigurationSettings.java | 4 +- .../xpack/core/ssl/SSLService.java | 16 ++--- .../xpack/core/ssl/SslSettingsLoader.java | 4 +- .../termsenum/action/MultiShardTermsEnum.java | 6 +- .../core/transform/TransformMetadata.java | 4 +- .../transform/action/StopTransformAction.java | 4 +- .../transform/transforms/SettingsConfig.java | 6 +- .../transform/transforms/TransformConfig.java | 4 +- .../transforms/TransformProgress.java | 6 +- .../pivot/DateHistogramGroupSource.java | 6 +- .../pivot/HistogramGroupSource.java | 4 +- .../actions/throttler/PeriodThrottler.java | 16 ++--- .../watcher/client/WatchSourceBuilder.java | 16 +++-- .../core/watcher/common/stats/Counters.java | 4 +- .../execution/WatchExecutionContext.java | 26 ++++---- .../transform/chain/ChainTransform.java | 8 +-- .../actions/execute/ExecuteWatchRequest.java | 3 +- .../xpack/core/watcher/watch/Watch.java | 4 +- 37 files changed, 227 insertions(+), 153 deletions(-) diff --git a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/HiddenFieldCheck.java b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/HiddenFieldCheck.java index 23155cc2971e7..a27558f046698 100644 --- a/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/HiddenFieldCheck.java +++ b/build-conventions/src/main/java/org/elasticsearch/gradle/internal/checkstyle/HiddenFieldCheck.java @@ -71,9 +71,15 @@ public class HiddenFieldCheck extends AbstractCheck { /** Control whether to ignore constructor parameters. */ private boolean ignoreConstructorParameter; + /** Control whether to ignore variables in constructor bodies. */ + private boolean ignoreConstructorBody; + /** Control whether to ignore parameters of abstract methods. */ private boolean ignoreAbstractMethods; + /** If set, specifies a regex of method names that should be ignored */ + private String ignoredMethodNames; + @Override public int[] getDefaultTokens() { return getAcceptableTokens(); @@ -224,7 +230,8 @@ private void processVariable(DetailAST ast) { if ((frame.containsStaticField(name) || isInstanceField(ast, name)) && isMatchingRegexp(name) == false - && isIgnoredParam(ast, name) == false) { + && isIgnoredParam(ast, name) == false + && isIgnoredVariable(ast, name) == false) { log(nameAST, MSG_KEY, name); } } @@ -238,7 +245,14 @@ && isIgnoredParam(ast, name) == false) { * @return true if parameter is ignored. */ private boolean isIgnoredParam(DetailAST ast, String name) { - return isIgnoredSetterParam(ast, name) || isIgnoredConstructorParam(ast) || isIgnoredParamOfAbstractMethod(ast); + return isVariableInIgnoredMethod(ast, name) + || isIgnoredSetterParam(ast, name) + || isIgnoredConstructorParam(ast) + || isIgnoredParamOfAbstractMethod(ast); + } + + private boolean isIgnoredVariable(DetailAST ast, String name) { + return isIgnoredVariableInConstructorBody(ast, name); } /** @@ -410,6 +424,42 @@ private boolean isIgnoredParamOfAbstractMethod(DetailAST ast) { return result; } + /** + * Decides whether to ignore an AST node that is the parameter of a method whose + * name matches the {@link #ignoredMethodNames} regex, if set. + * @param ast the AST to check + * @return true is the ast should be ignored because the parameter belongs to a + * method whose name matches the regex. + */ + private boolean isVariableInIgnoredMethod(DetailAST ast, String name) { + boolean result = false; + if (ignoredMethodNames != null && (ast.getType() == TokenTypes.PARAMETER_DEF || ast.getType() == TokenTypes.VARIABLE_DEF)) { + DetailAST method = ast.getParent(); + while (method != null && method.getType() != TokenTypes.METHOD_DEF) { + method = method.getParent(); + } + if (method != null && method.getType() == TokenTypes.METHOD_DEF) { + final String methodName = method.findFirstToken(TokenTypes.IDENT).getText(); + result = methodName.matches(ignoredMethodNames); + } + } + return result; + } + + private boolean isIgnoredVariableInConstructorBody(DetailAST ast, String name) { + boolean result = false; + + if (ignoreConstructorBody && ast.getType() == TokenTypes.VARIABLE_DEF) { + DetailAST method = ast.getParent(); + while (method != null && method.getType() != TokenTypes.CTOR_DEF) { + method = method.getParent(); + } + result = method != null && method.getType() == TokenTypes.CTOR_DEF; + } + + return result; + } + /** * Setter to define the RegExp for names of variables and parameters to ignore. * @@ -463,6 +513,14 @@ public void setIgnoreAbstractMethods(boolean ignoreAbstractMethods) { this.ignoreAbstractMethods = ignoreAbstractMethods; } + public void setIgnoredMethodNames(String ignoredMethodNames) { + this.ignoredMethodNames = ignoredMethodNames; + } + + public void setIgnoreConstructorBody(boolean ignoreConstructorBody) { + this.ignoreConstructorBody = ignoreConstructorBody; + } + /** * Holds the names of static and instance fields of a type. */ diff --git a/build-tools-internal/src/main/resources/checkstyle.xml b/build-tools-internal/src/main/resources/checkstyle.xml index abaab3a1a8ae0..c9ce78ef06b6b 100644 --- a/build-tools-internal/src/main/resources/checkstyle.xml +++ b/build-tools-internal/src/main/resources/checkstyle.xml @@ -111,10 +111,12 @@ diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java index 47e40b126d044..5ea943a1828ab 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -1421,8 +1421,8 @@ protected void innerSendShardChangesRequest( @Override protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final LongSupplier followerGlobalCheckpoint) { if (scheduleRetentionLeaseRenewal.get()) { - final ScheduledThreadPoolExecutor scheduler = Scheduler.initScheduler(Settings.EMPTY, "test-scheduler"); - final ScheduledFuture future = scheduler.scheduleWithFixedDelay( + final ScheduledThreadPoolExecutor testScheduler = Scheduler.initScheduler(Settings.EMPTY, "test-scheduler"); + final ScheduledFuture future = testScheduler.scheduleWithFixedDelay( () -> retentionLeaseRenewal.accept(followerGlobalCheckpoint.getAsLong()), 0, TimeValue.timeValueMillis(200).millis(), @@ -1433,7 +1433,7 @@ protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final Lo @Override public boolean cancel() { final boolean cancel = future.cancel(true); - scheduler.shutdown(); + testScheduler.shutdown(); return cancel; } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java index 970be2675f9a2..a4c44a1a749b7 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -109,8 +109,8 @@ public void testFollowingEngineRejectsNonFollowingIndex() throws IOException { public void testIndexSeqNoIsMaintained() throws IOException { final long seqNo = randomIntBetween(0, Integer.MAX_VALUE); - runIndexTest(seqNo, Engine.Operation.Origin.PRIMARY, (followingEngine, index) -> { - final Engine.IndexResult result = followingEngine.index(index); + runIndexTest(seqNo, Engine.Operation.Origin.PRIMARY, (followingEngine, indexToTest) -> { + final Engine.IndexResult result = followingEngine.index(indexToTest); assertThat(result.getSeqNo(), equalTo(seqNo)); }); } @@ -156,8 +156,8 @@ public void runIndexTest( try (Store store = createStore(shardId, indexSettings, newDirectory())) { final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store); try (FollowingEngine followingEngine = createEngine(store, engineConfig)) { - final Engine.Index index = indexForFollowing("id", seqNo, origin); - consumer.accept(followingEngine, index); + final Engine.Index indexToTest = indexForFollowing("id", seqNo, origin); + consumer.accept(followingEngine, indexToTest); } } } @@ -226,16 +226,21 @@ public void testDoNotFillSeqNoGaps() throws Exception { } private EngineConfig engineConfig( - final ShardId shardId, + final ShardId shardIdValue, final IndexSettings indexSettings, final ThreadPool threadPool, final Store store ) { final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); final Path translogPath = createTempDir("translog"); - final TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + final TranslogConfig translogConfig = new TranslogConfig( + shardIdValue, + translogPath, + indexSettings, + BigArrays.NON_RECYCLING_INSTANCE + ); return new EngineConfig( - shardId, + shardIdValue, threadPool, indexSettings, null, @@ -331,26 +336,26 @@ private Engine.Delete deleteForPrimary(String id) { return new Engine.Delete(parsedDoc.id(), EngineTestCase.newUid(parsedDoc), primaryTerm.get()); } - private Engine.Result applyOperation(Engine engine, Engine.Operation op, long primaryTerm, Engine.Operation.Origin origin) + private Engine.Result applyOperation(Engine engine, Engine.Operation op, long primaryTermValue, Engine.Operation.Origin origin) throws IOException { final VersionType versionType = origin == Engine.Operation.Origin.PRIMARY ? VersionType.EXTERNAL : null; final Engine.Result result; if (op instanceof Engine.Index) { - Engine.Index index = (Engine.Index) op; + Engine.Index engineIndex = (Engine.Index) op; result = engine.index( new Engine.Index( - index.uid(), - index.parsedDoc(), - index.seqNo(), - primaryTerm, - index.version(), + engineIndex.uid(), + engineIndex.parsedDoc(), + engineIndex.seqNo(), + primaryTermValue, + engineIndex.version(), versionType, origin, - index.startTime(), - index.getAutoGeneratedIdTimestamp(), - index.isRetry(), - index.getIfSeqNo(), - index.getIfPrimaryTerm() + engineIndex.startTime(), + engineIndex.getAutoGeneratedIdTimestamp(), + engineIndex.isRetry(), + engineIndex.getIfSeqNo(), + engineIndex.getIfPrimaryTerm() ) ); } else if (op instanceof Engine.Delete) { @@ -360,7 +365,7 @@ private Engine.Result applyOperation(Engine engine, Engine.Operation op, long pr delete.id(), delete.uid(), delete.seqNo(), - primaryTerm, + primaryTermValue, delete.version(), versionType, origin, @@ -371,7 +376,7 @@ private Engine.Result applyOperation(Engine engine, Engine.Operation op, long pr ); } else { Engine.NoOp noOp = (Engine.NoOp) op; - result = engine.noOp(new Engine.NoOp(noOp.seqNo(), primaryTerm, origin, noOp.startTime(), noOp.reason())); + result = engine.noOp(new Engine.NoOp(noOp.seqNo(), primaryTermValue, origin, noOp.startTime(), noOp.reason())); } return result; } @@ -828,7 +833,7 @@ public void testProcessOnceOnPrimary() throws Exception { */ public void testVerifyShardBeforeIndexClosingIsNoOp() throws IOException { final long seqNo = randomIntBetween(0, Integer.MAX_VALUE); - runIndexTest(seqNo, Engine.Operation.Origin.PRIMARY, (followingEngine, index) -> { + runIndexTest(seqNo, Engine.Operation.Origin.PRIMARY, (followingEngine, indexToTest) -> { globalCheckpoint.set(randomNonNegativeLong()); try { followingEngine.verifyEngineBeforeIndexClosing(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AutoFollowStatsMonitoringDocTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AutoFollowStatsMonitoringDocTests.java index 5a86f44bb5b90..c5e10371381c1 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AutoFollowStatsMonitoringDocTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/AutoFollowStatsMonitoringDocTests.java @@ -88,7 +88,7 @@ public void testToXContent() throws IOException { final NavigableMap trackingClusters = new TreeMap<>( Collections.singletonMap(randomAlphaOfLength(4), new AutoFollowedCluster(1L, 1L)) ); - final AutoFollowStats autoFollowStats = new AutoFollowStats( + autoFollowStats = new AutoFollowStats( randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java index a561e4d79e0e4..72502c044bf59 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/FollowStatsMonitoringDocTests.java @@ -117,7 +117,7 @@ public void testToXContent() throws IOException { ) ); final long timeSinceLastReadMillis = randomNonNegativeLong(); - final ShardFollowNodeTaskStatus status = new ShardFollowNodeTaskStatus( + final ShardFollowNodeTaskStatus taskStatus = new ShardFollowNodeTaskStatus( "leader_cluster", "leader_index", "follower_index", @@ -148,7 +148,7 @@ public void testToXContent() throws IOException { timeSinceLastReadMillis, new ElasticsearchException("fatal error") ); - final FollowStatsMonitoringDoc document = new FollowStatsMonitoringDoc("_cluster", timestamp, intervalMillis, node, status); + final FollowStatsMonitoringDoc document = new FollowStatsMonitoringDoc("_cluster", timestamp, intervalMillis, node, taskStatus); final BytesReference xContent = XContentHelper.toXContent(document, XContentType.JSON, false); assertThat( xContent.utf8ToString(), @@ -273,7 +273,7 @@ public void testShardFollowNodeTaskStatusFieldsMapped() throws IOException { final NavigableMap> fetchExceptions = new TreeMap<>( Collections.singletonMap(1L, Tuple.tuple(2, new ElasticsearchException("shard is sad"))) ); - final ShardFollowNodeTaskStatus status = new ShardFollowNodeTaskStatus( + final ShardFollowNodeTaskStatus taskStatus = new ShardFollowNodeTaskStatus( "remote_cluster", "leader_index", "follower_index", @@ -305,7 +305,7 @@ public void testShardFollowNodeTaskStatusFieldsMapped() throws IOException { new ElasticsearchException("fatal error") ); XContentBuilder builder = jsonBuilder(); - builder.value(status); + builder.value(taskStatus); Map serializedStatus = XContentHelper.convertToMap(XContentType.JSON.xContent(), Strings.toString(builder), false); byte[] loadedTemplate = MonitoringTemplateRegistry.getTemplateConfigForMonitoredSystem(MonitoredSystem.ES).loadBytes(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java index 56336ce8d5ab0..54b67535b38a1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java @@ -74,7 +74,7 @@ public String[] indices() { } @Override - public IndicesRequest indices(String... indices) { + public IndicesRequest indices(@SuppressWarnings("HiddenField") String... indices) { Objects.requireNonNull(indices, "indices must not be null"); for (String index : indices) { Objects.requireNonNull(index, "index must not be null"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java index 935430656a72d..a9777213faf8f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/scheduler/Cron.java @@ -787,7 +787,7 @@ public static void validate(String expression) throws IllegalArgumentException { // //////////////////////////////////////////////////////////////////////////// - private void buildExpression(String expression) { + private void buildExpression(@SuppressWarnings("HiddenField") String expression) { try { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncSearchResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncSearchResponse.java index 22d4fe9644ec4..b05d30289b528 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncSearchResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncSearchResponse.java @@ -100,8 +100,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(expirationTimeMillis); } - public AsyncSearchResponse clone(String id) { - return new AsyncSearchResponse(id, searchResponse, error, isPartial, false, startTimeMillis, expirationTimeMillis); + public AsyncSearchResponse clone(String searchId) { + return new AsyncSearchResponse(searchId, searchResponse, error, isPartial, false, startTimeMillis, expirationTimeMillis); } /** @@ -165,8 +165,8 @@ public long getExpirationTime() { } @Override - public AsyncSearchResponse withExpirationTime(long expirationTimeMillis) { - return new AsyncSearchResponse(id, searchResponse, error, isPartial, isRunning, startTimeMillis, expirationTimeMillis); + public AsyncSearchResponse withExpirationTime(long expirationTime) { + return new AsyncSearchResponse(id, searchResponse, error, isPartial, isRunning, startTimeMillis, expirationTime); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/CommandLineHttpClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/CommandLineHttpClient.java index bb64ff90af63e..a74313121501e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/CommandLineHttpClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/CommandLineHttpClient.java @@ -343,7 +343,7 @@ public static String apiKeyHeaderValue(SecureString apiKey) { * Returns a TrustManager to be used in a client SSLContext, which trusts all certificates that are signed * by a specific CA certificate ( identified by its SHA256 fingerprint, {@code pinnedCaCertFingerPrint} ) */ - private TrustManager fingerprintTrustingTrustManager(String pinnedCaCertFingerprint) { + private TrustManager fingerprintTrustingTrustManager(String caCertFingerprint) { final TrustManager trustManager = new X509TrustManager() { public void checkClientTrusted(X509Certificate[] chain, String authType) throws CertificateException {} @@ -354,7 +354,7 @@ public void checkServerTrusted(X509Certificate[] chain, String authType) throws final Certificate caCertFromChain = chain[1]; MessageDigest sha256 = MessageDigests.sha256(); sha256.update(caCertFromChain.getEncoded()); - if (MessageDigests.toHexString(sha256.digest()).equals(pinnedCaCertFingerprint) == false) { + if (MessageDigests.toHexString(sha256.digest()).equals(caCertFingerprint) == false) { throw new CertificateException(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java index 4bc503246c1c1..2ee259ba52d9f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/InvalidateApiKeyRequest.java @@ -249,14 +249,16 @@ public int hashCode() { return Objects.hash(realmName, userName, ids, name, ownedByAuthenticatedUser); } - private void validateIds(@Nullable String[] ids) { - if (ids != null) { - if (ids.length == 0) { + private void validateIds(@Nullable String[] idsToValidate) { + if (idsToValidate != null) { + if (idsToValidate.length == 0) { final ActionRequestValidationException validationException = new ActionRequestValidationException(); validationException.addValidationError("Field [ids] cannot be an empty array"); throw validationException; } else { - final int[] idxOfBlankIds = IntStream.range(0, ids.length).filter(i -> Strings.hasText(ids[i]) == false).toArray(); + final int[] idxOfBlankIds = IntStream.range(0, idsToValidate.length) + .filter(i -> Strings.hasText(idsToValidate[i]) == false) + .toArray(); if (idxOfBlankIds.length > 0) { final ActionRequestValidationException validationException = new ActionRequestValidationException(); validationException.addValidationError( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java index 26881286c9723..5c5170cd41d6a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java @@ -95,9 +95,9 @@ public ActionRequestValidationException validate() { } catch (IllegalArgumentException e) { validationException = addValidationError(e.getMessage(), validationException); } - for (String name : privilege.getPrivileges()) { + for (String privilegeName : privilege.getPrivileges()) { try { - ApplicationPrivilege.validatePrivilegeOrActionName(name); + ApplicationPrivilege.validatePrivilegeOrActionName(privilegeName); } catch (IllegalArgumentException e) { validationException = addValidationError(e.getMessage(), validationException); } @@ -117,12 +117,12 @@ public void name(String name) { this.name = name; } - public void cluster(String... clusterPrivileges) { - this.clusterPrivileges = clusterPrivileges; + public void cluster(String... clusterPrivilegesArray) { + this.clusterPrivileges = clusterPrivilegesArray; } - public void conditionalCluster(ConfigurableClusterPrivilege... configurableClusterPrivileges) { - this.configurableClusterPrivileges = configurableClusterPrivileges; + public void conditionalCluster(ConfigurableClusterPrivilege... configurableClusterPrivilegesArray) { + this.configurableClusterPrivileges = configurableClusterPrivilegesArray; } public void addIndex(RoleDescriptor.IndicesPrivileges... privileges) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/IndicesAccessControl.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/IndicesAccessControl.java index 15ea0ba298038..4cee1a3006b10 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/IndicesAccessControl.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/IndicesAccessControl.java @@ -182,7 +182,7 @@ public DocumentPermissions getDocumentPermissions() { } /** - * Returns a instance of {@link IndexAccessControl}, where the privileges for {@code this} object are constrained by the privileges + * Returns an instance of {@link IndexAccessControl}, where the privileges for {@code this} object are constrained by the privileges * contained in the provided parameter.
* Allowed fields for this index permission would be an intersection of allowed fields.
* Allowed documents for this index permission would be an intersection of allowed documents.
@@ -193,17 +193,19 @@ public DocumentPermissions getDocumentPermissions() { * @see DocumentPermissions#limitDocumentPermissions(DocumentPermissions) */ public IndexAccessControl limitIndexAccessControl(IndexAccessControl limitedByIndexAccessControl) { - final boolean granted; + final boolean isGranted; if (this.granted == limitedByIndexAccessControl.granted) { - granted = this.granted; + isGranted = this.granted; } else { - granted = false; + isGranted = false; } - FieldPermissions fieldPermissions = getFieldPermissions().limitFieldPermissions(limitedByIndexAccessControl.fieldPermissions); - DocumentPermissions documentPermissions = getDocumentPermissions().limitDocumentPermissions( + FieldPermissions constrainedFieldPermissions = getFieldPermissions().limitFieldPermissions( + limitedByIndexAccessControl.fieldPermissions + ); + DocumentPermissions constrainedDocumentPermissions = getDocumentPermissions().limitDocumentPermissions( limitedByIndexAccessControl.getDocumentPermissions() ); - return new IndexAccessControl(granted, fieldPermissions, documentPermissions); + return new IndexAccessControl(isGranted, constrainedFieldPermissions, constrainedDocumentPermissions); } @Override @@ -264,23 +266,23 @@ public IndicesAccessControl limitIndicesAccessControl(IndicesAccessControl limit return this; } - final boolean granted; + final boolean isGranted; if (this.granted == limitedByIndicesAccessControl.granted) { - granted = this.granted; + isGranted = this.granted; } else { - granted = false; + isGranted = false; } Set indexes = indexPermissions.keySet(); Set otherIndexes = limitedByIndicesAccessControl.indexPermissions.keySet(); Set commonIndexes = Sets.intersection(indexes, otherIndexes); - Map indexPermissions = new HashMap<>(commonIndexes.size()); + Map indexPermissionsMap = new HashMap<>(commonIndexes.size()); for (String index : commonIndexes) { IndexAccessControl indexAccessControl = getIndexPermissions(index); IndexAccessControl limitedByIndexAccessControl = limitedByIndicesAccessControl.getIndexPermissions(index); - indexPermissions.put(index, indexAccessControl.limitIndexAccessControl(limitedByIndexAccessControl)); + indexPermissionsMap.put(index, indexAccessControl.limitIndexAccessControl(limitedByIndexAccessControl)); } - return new IndicesAccessControl(granted, indexPermissions); + return new IndicesAccessControl(isGranted, indexPermissionsMap); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java index d673fbb801d6b..bc665e301f095 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java @@ -199,8 +199,8 @@ public static Automaton buildPermittedFieldsAutomaton(final String[] grantedFiel */ public FieldPermissions limitFieldPermissions(FieldPermissions limitedBy) { if (hasFieldLevelSecurity() && limitedBy != null && limitedBy.hasFieldLevelSecurity()) { - Automaton permittedFieldsAutomaton = Automatons.intersectAndMinimize(getIncludeAutomaton(), limitedBy.getIncludeAutomaton()); - return new FieldPermissions(fieldPermissionsDefinition, limitedBy.fieldPermissionsDefinition, permittedFieldsAutomaton); + Automaton _permittedFieldsAutomaton = Automatons.intersectAndMinimize(getIncludeAutomaton(), limitedBy.getIncludeAutomaton()); + return new FieldPermissions(fieldPermissionsDefinition, limitedBy.fieldPermissionsDefinition, _permittedFieldsAutomaton); } else if (limitedBy != null && limitedBy.hasFieldLevelSecurity()) { return new FieldPermissions(limitedBy.getFieldPermissionsDefinition(), limitedBy.getIncludeAutomaton()); } else if (hasFieldLevelSecurity()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index efa0b6aff2b22..b666d9f400647 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -109,8 +109,8 @@ private StringMatcher indexMatcher(Collection ordinaryIndices, Collectio } else { matcher = StringMatcher.of(ordinaryIndices); if (restrictedNamesAutomaton != null) { - CharacterRunAutomaton characterRunAutomaton = new CharacterRunAutomaton(restrictedNamesAutomaton); - matcher = matcher.and("", name -> characterRunAutomaton.run(name) == false); + CharacterRunAutomaton automaton = new CharacterRunAutomaton(restrictedNamesAutomaton); + matcher = matcher.and("", name -> automaton.run(name) == false); } if (restrictedIndices.isEmpty() == false) { matcher = StringMatcher.of(restrictedIndices).or(matcher); @@ -331,11 +331,11 @@ public Collection resolveConcreteIndices() { return List.of(indexAbstraction.getName()); } else { final List indices = indexAbstraction.getIndices(); - final List concreteIndices = new ArrayList<>(indices.size()); + final List concreteIndexNames = new ArrayList<>(indices.size()); for (var idx : indices) { - concreteIndices.add(idx.getName()); + concreteIndexNames.add(idx.getName()); } - return concreteIndices; + return concreteIndexNames; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ResourcePrivileges.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ResourcePrivileges.java index 1051f211c2cea..db2e863647f1a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ResourcePrivileges.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ResourcePrivileges.java @@ -80,8 +80,8 @@ public Builder addPrivilege(String privilege, Boolean allowed) { return this; } - public Builder addPrivileges(Map privileges) { - for (Entry entry : privileges.entrySet()) { + public Builder addPrivileges(Map privilegeMap) { + for (Entry entry : privilegeMap.entrySet()) { addPrivilege(entry.getKey(), entry.getValue()); } return this; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java index 5b5b3f8abb9f0..0f9ff0eee78ee 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java @@ -135,8 +135,8 @@ public SnapshotRetentionConfiguration getRetentionPolicy() { } public long calculateNextExecution() { - final Cron schedule = new Cron(this.schedule); - return schedule.getNextValidTimeAfter(System.currentTimeMillis()); + final Cron scheduleEvaluator = new Cron(this.schedule); + return scheduleEvaluator.getNextValidTimeAfter(System.currentTimeMillis()); } /** @@ -149,9 +149,9 @@ public long calculateNextExecution() { * if either of the next two times after now is unsupported according to @{@link Cron#getNextValidTimeAfter(long)} */ public TimeValue calculateNextInterval() { - final Cron schedule = new Cron(this.schedule); - long next1 = schedule.getNextValidTimeAfter(System.currentTimeMillis()); - long next2 = schedule.getNextValidTimeAfter(next1); + final Cron scheduleEvaluator = new Cron(this.schedule); + long next1 = scheduleEvaluator.getNextValidTimeAfter(System.currentTimeMillis()); + long next2 = scheduleEvaluator.getNextValidTimeAfter(next1); if (next1 > 0 && next2 > 0) { return TimeValue.timeValueMillis(next2 - next1); } else { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java index 27a6a30e8d1f6..1a4ee92023f02 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java @@ -247,13 +247,13 @@ public Builder setModifiedDate(long modifiedDate) { return this; } - public Builder setLastSuccess(SnapshotInvocationRecord lastSuccessDate) { - this.lastSuccessDate = lastSuccessDate; + public Builder setLastSuccess(SnapshotInvocationRecord lastSuccess) { + this.lastSuccessDate = lastSuccess; return this; } - public Builder setLastFailure(SnapshotInvocationRecord lastFailureDate) { - this.lastFailureDate = lastFailureDate; + public Builder setLastFailure(SnapshotInvocationRecord lastFailure) { + this.lastFailureDate = lastFailure; return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java index f834f25496080..c7fc42b18dbeb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java @@ -133,14 +133,14 @@ public SnapshotLifecycleStats merge(SnapshotLifecycleStats other) { } public SnapshotLifecycleStats removePolicy(String policyId) { - Map policyStats = new HashMap<>(this.policyStats); - policyStats.remove(policyId); + Map policyStatsCopy = new HashMap<>(this.policyStats); + policyStatsCopy.remove(policyId); return new SnapshotLifecycleStats( this.retentionRunCount.count(), this.retentionFailedCount.count(), this.retentionTimedOut.count(), this.retentionTimeMs.count(), - policyStats + policyStatsCopy ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java index d409519313c88..545fc01470473 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationSettings.java @@ -387,8 +387,8 @@ public Setting.AffixSetting affixSetting(String groupPrefix, String keyPrefix return Setting.affixKeySetting(groupPrefix, keyPrefix + name, template); } - public Setting transportProfile(String name) { - return transportProfile().getConcreteSetting(name); + public Setting transportProfile(String settingName) { + return transportProfile().getConcreteSetting(settingName); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index 9c7d34e03731b..05bdc006921a6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -185,7 +185,7 @@ public SSLService createDynamicSSLService() { return new SSLService(env, sslConfigurations, sslContexts) { @Override - Map loadSslConfigurations(Map sslConfigurations) { + Map loadSslConfigurations(Map unused) { // we don't need to load anything... return Collections.emptyMap(); } @@ -214,16 +214,16 @@ public static void registerSettings(List> settingList) { * Create a new {@link SSLIOSessionStrategy} based on the provided settings. The settings are used to identify the SSL configuration * that should be used to create the context. * - * @param settings the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will return - * a context created from the default configuration + * @param settingsToUse the settings used to identify the ssl configuration, typically under a *.ssl. prefix. An empty settings will + * return a context created from the default configuration * @return Never {@code null}. * @deprecated This method will fail if the SSL configuration uses a {@link org.elasticsearch.common.settings.SecureSetting} but the * {@link org.elasticsearch.common.settings.SecureSettings} have been closed. Use {@link #getSSLConfiguration(String)} * and {@link #sslIOSessionStrategy(SslConfiguration)} (Deprecated, but not removed because monitoring uses dynamic SSL settings) */ @Deprecated - public SSLIOSessionStrategy sslIOSessionStrategy(Settings settings) { - SslConfiguration config = sslConfiguration(settings); + public SSLIOSessionStrategy sslIOSessionStrategy(Settings settingsToUse) { + SslConfiguration config = sslConfiguration(settingsToUse); return sslIOSessionStrategy(config); } @@ -395,11 +395,11 @@ SSLContextHolder sslContextHolder(SslConfiguration sslConfiguration) { /** * Returns the existing {@link SslConfiguration} for the given settings * - * @param settings the settings for the ssl configuration + * @param settingsToUse the settings for the ssl configuration * @return the ssl configuration for the provided settings */ - public SslConfiguration sslConfiguration(Settings settings) { - return SslSettingsLoader.load(settings, null, env); + public SslConfiguration sslConfiguration(Settings settingsToUse) { + return SslSettingsLoader.load(settingsToUse, null, env); } public Set getTransportProfileContextNames() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SslSettingsLoader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SslSettingsLoader.java index e46abca986cf5..216b39c81415f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SslSettingsLoader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SslSettingsLoader.java @@ -50,8 +50,8 @@ public SslSettingsLoader(Settings settings, String settingPrefix, boolean accept setDefaultClientAuth(SslClientAuthenticationMode.REQUIRED); } - private Map> mapOf(List> settings) { - return settings.stream().collect(Collectors.toMap(s -> s.getKey(), Function.identity())); + private Map> mapOf(List> settingList) { + return settingList.stream().collect(Collectors.toMap(Setting::getKey, Function.identity())); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/MultiShardTermsEnum.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/MultiShardTermsEnum.java index f5b264ebcef57..a240f1fef671d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/MultiShardTermsEnum.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/MultiShardTermsEnum.java @@ -75,9 +75,9 @@ private void pullTop() { private void pushTop() throws IOException { // call next() on each top, and reorder queue for (int i = 0; i < numTop; i++) { - TermsEnumWithCurrent top = queue.top(); - top.current = top.terms.next(); - if (top.current == null) { + TermsEnumWithCurrent termsEnum = queue.top(); + termsEnum.current = termsEnum.terms.next(); + if (termsEnum.current == null) { queue.pop(); } else { queue.updateTop(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java index 38d71d35f59d0..48fc48ab102b6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java @@ -153,8 +153,8 @@ public Builder(@Nullable TransformMetadata previous) { } } - public TransformMetadata.Builder isResetMode(boolean resetMode) { - this.resetMode = resetMode; + public TransformMetadata.Builder isResetMode(boolean isResetMode) { + this.resetMode = isResetMode; return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java index 62be52fae0ce8..22eaa840686fa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java @@ -175,9 +175,9 @@ public boolean equals(Object obj) { @Override public boolean match(Task task) { if (task.getDescription().startsWith(TransformField.PERSISTENT_TASK_DESCRIPTION_PREFIX)) { - String id = task.getDescription().substring(TransformField.PERSISTENT_TASK_DESCRIPTION_PREFIX.length()); + String taskId = task.getDescription().substring(TransformField.PERSISTENT_TASK_DESCRIPTION_PREFIX.length()); if (expandedIds != null) { - return expandedIds.contains(id); + return expandedIds.contains(taskId); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java index 27cd1caec192b..6c33776aa2b03 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java @@ -249,11 +249,11 @@ public Builder setMaxPageSearchSize(Integer maxPageSearchSize) { * This setting throttles transform by issuing queries less often, however processing still happens in * batches. A value of 0 disables throttling (default). * - * @param docsPerSecond Integer value + * @param documentsPerSecond Integer value * @return the {@link Builder} with requestsPerSecond set. */ - public Builder setRequestsPerSecond(Float docsPerSecond) { - this.docsPerSecond = docsPerSecond == null ? DEFAULT_DOCS_PER_SECOND : docsPerSecond; + public Builder setRequestsPerSecond(Float documentsPerSecond) { + this.docsPerSecond = documentsPerSecond == null ? DEFAULT_DOCS_PER_SECOND : documentsPerSecond; return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java index c36b70df6c4b4..90e1e1389b9a8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java @@ -308,8 +308,8 @@ public Version getVersion() { return transformVersion; } - public TransformConfig setVersion(Version transformVersion) { - this.transformVersion = transformVersion; + public TransformConfig setVersion(Version version) { + this.transformVersion = version; return this; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgress.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgress.java index 6b6788c550558..1b79d474d9080 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgress.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformProgress.java @@ -114,9 +114,9 @@ public void incrementDocsProcessed(long docsProcessed) { this.documentsProcessed += docsProcessed; } - public void incrementDocsIndexed(long documentsIndexed) { - assert documentsIndexed >= 0; - this.documentsIndexed += documentsIndexed; + public void incrementDocsIndexed(long numDocumentsIndexed) { + assert numDocumentsIndexed >= 0; + this.documentsIndexed += numDocumentsIndexed; } public long getDocumentsProcessed() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java index b4876840e2d91..79c7f4438316a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java @@ -194,9 +194,9 @@ private Interval readInterval(StreamInput in) throws IOException { } } - private void writeInterval(Interval interval, StreamOutput out) throws IOException { - out.write(interval.getIntervalTypeId()); - interval.writeTo(out); + private void writeInterval(Interval anInterval, StreamOutput out) throws IOException { + out.write(anInterval.getIntervalTypeId()); + anInterval.writeTo(out); } private static final String NAME = "data_frame_date_histogram_group"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSource.java index 717ba9af098a9..6d24209ba26be 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/HistogramGroupSource.java @@ -44,8 +44,8 @@ private static ConstructingObjectParser createParser String field = (String) args[0]; ScriptConfig scriptConfig = (ScriptConfig) args[1]; boolean missingBucket = args[2] == null ? false : (boolean) args[2]; - double interval = (double) args[3]; - return new HistogramGroupSource(field, scriptConfig, missingBucket, interval); + double intervalValue = (double) args[3]; + return new HistogramGroupSource(field, scriptConfig, missingBucket, intervalValue); }); declareValuesSourceFields(parser, lenient); parser.declareDouble(optionalConstructorArg(), INTERVAL); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java index efc7274b36f41..27647da6d078f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/actions/throttler/PeriodThrottler.java @@ -17,7 +17,7 @@ /** * This throttler throttles the action based on its last successful execution time. If the time passed since - * the last successful execution is lower than the given period, the aciton will be throttled. + * the last successful execution is lower than the given period, the action will be throttled. */ public class PeriodThrottler implements Throttler { @@ -36,14 +36,14 @@ public TimeValue period() { @Override public Result throttle(String actionId, WatchExecutionContext ctx) { - TimeValue period = this.period; - if (period == null) { + TimeValue throttlePeriod = this.period; + if (throttlePeriod == null) { // falling back on the throttle period of the watch - period = ctx.watch().throttlePeriod(); + throttlePeriod = ctx.watch().throttlePeriod(); } - if (period == null) { + if (throttlePeriod == null) { // falling back on the default throttle period of watcher - period = ctx.defaultThrottlePeriod(); + throttlePeriod = ctx.defaultThrottlePeriod(); } ActionStatus status = ctx.watch().status().actionStatus(actionId); if (status.lastSuccessfulExecution() == null) { @@ -52,11 +52,11 @@ public Result throttle(String actionId, WatchExecutionContext ctx) { long now = clock.millis(); long executionTime = status.lastSuccessfulExecution().timestamp().toInstant().toEpochMilli(); TimeValue timeElapsed = TimeValue.timeValueMillis(now - executionTime); - if (timeElapsed.getMillis() <= period.getMillis()) { + if (timeElapsed.getMillis() <= throttlePeriod.getMillis()) { return Result.throttle( PERIOD, "throttling interval is set to [{}] but time elapsed since last execution is [{}]", - period, + throttlePeriod, timeElapsed ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatchSourceBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatchSourceBuilder.java index e21a490b45f13..7597902de6f37 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatchSourceBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/client/WatchSourceBuilder.java @@ -91,12 +91,13 @@ public WatchSourceBuilder addAction(String id, TimeValue throttlePeriod, Action. public WatchSourceBuilder addAction( String id, - Transform.Builder transform, + Transform.Builder transformBuilder, Action.Builder action ) { - return addAction(id, null, transform.build(), action.build()); + return addAction(id, null, transformBuilder.build(), action.build()); } + @SuppressWarnings("HiddenField") public WatchSourceBuilder addAction(String id, Condition condition, Action.Builder action) { return addAction(id, null, condition, null, action.build()); } @@ -104,17 +105,18 @@ public WatchSourceBuilder addAction(String id, Condition condition, Action.Build public WatchSourceBuilder addAction( String id, TimeValue throttlePeriod, - Transform.Builder transform, + Transform.Builder transformBuilder, Action.Builder action ) { - return addAction(id, throttlePeriod, transform.build(), action.build()); + return addAction(id, throttlePeriod, transformBuilder.build(), action.build()); } - public WatchSourceBuilder addAction(String id, TimeValue throttlePeriod, Transform transform, Action action) { - actions.put(id, new TransformedAction(id, action, throttlePeriod, null, transform, null)); + public WatchSourceBuilder addAction(String id, TimeValue throttlePeriod, Transform aTransform, Action action) { + actions.put(id, new TransformedAction(id, action, throttlePeriod, null, aTransform, null)); return this; } + @SuppressWarnings("HiddenField") public WatchSourceBuilder addAction( String id, TimeValue throttlePeriod, @@ -125,11 +127,13 @@ public WatchSourceBuilder addAction( return addAction(id, throttlePeriod, condition, transform.build(), action.build()); } + @SuppressWarnings("HiddenField") public WatchSourceBuilder addAction(String id, TimeValue throttlePeriod, Condition condition, Transform transform, Action action) { actions.put(id, new TransformedAction(id, action, throttlePeriod, condition, transform, null)); return this; } + @SuppressWarnings("HiddenField") public WatchSourceBuilder addAction( String id, TimeValue throttlePeriod, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java index ea3481511feb9..7863fa1000999 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/stats/Counters.java @@ -29,8 +29,8 @@ public class Counters implements Writeable { private ObjectLongHashMap counters = new ObjectLongHashMap<>(); public Counters(StreamInput in) throws IOException { - int counters = in.readVInt(); - for (int i = 0; i < counters; i++) { + int numCounters = in.readVInt(); + for (int i = 0; i < numCounters; i++) { inc(in.readString(), in.readVLong()); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionContext.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionContext.java index a6e6b593309b3..fa98a6ffa6e67 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionContext.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionContext.java @@ -164,11 +164,11 @@ public void beforeInput() { phase = ExecutionPhase.INPUT; } - public void onInputResult(Input.Result inputResult) { + public void onInputResult(Input.Result result) { assert phase.sealed() == false; - this.inputResult = inputResult; - if (inputResult.status() == Input.Result.Status.SUCCESS) { - this.payload = inputResult.payload(); + this.inputResult = result; + if (result.status() == Input.Result.Status.SUCCESS) { + this.payload = result.payload(); } } @@ -181,10 +181,10 @@ public void beforeCondition() { phase = ExecutionPhase.CONDITION; } - public void onConditionResult(Condition.Result conditionResult) { + public void onConditionResult(Condition.Result result) { assert phase.sealed() == false; - this.conditionResult = conditionResult; - watch().status().onCheck(conditionResult.met(), executionTime); + this.conditionResult = result; + watch().status().onCheck(result.met(), executionTime); } public Condition.Result conditionResult() { @@ -232,8 +232,8 @@ public WatchRecord abortBeforeExecution(ExecutionState state, String message) { public WatchRecord abortFailedExecution(String message) { assert phase.sealed() == false; phase = ExecutionPhase.ABORTED; - long executionTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); - WatchExecutionResult result = new WatchExecutionResult(this, executionTime); + long executionTimeNs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); + WatchExecutionResult result = new WatchExecutionResult(this, executionTimeNs); watch().status().setExecutionState(WatchRecord.getState(result)); return new WatchRecord.MessageWatchRecord(this, result, message); } @@ -241,8 +241,8 @@ public WatchRecord abortFailedExecution(String message) { public WatchRecord abortFailedExecution(Exception e) { assert phase.sealed() == false; phase = ExecutionPhase.ABORTED; - long executionTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); - WatchExecutionResult result = new WatchExecutionResult(this, executionTime); + long executionTimeNs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); + WatchExecutionResult result = new WatchExecutionResult(this, executionTimeNs); watch().status().setExecutionState(WatchRecord.getState(result)); return new WatchRecord.ExceptionWatchRecord(this, result, e); } @@ -250,8 +250,8 @@ public WatchRecord abortFailedExecution(Exception e) { public WatchRecord finish() { assert phase.sealed() == false; phase = ExecutionPhase.FINISHED; - long executionTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); - WatchExecutionResult result = new WatchExecutionResult(this, executionTime); + long executionTimeNs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - relativeStartTime); + WatchExecutionResult result = new WatchExecutionResult(this, executionTimeNs); watch().status().setExecutionState(WatchRecord.getState(result)); return new WatchRecord.MessageWatchRecord(this, result); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransform.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransform.java index d27a255dc0843..1ac2c7f57845d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransform.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transform/chain/ChainTransform.java @@ -151,13 +151,13 @@ public Builder(Transform... transforms) { add(transforms); } - public Builder add(Transform... transforms) { - Collections.addAll(this.transforms, transforms); + public Builder add(Transform... transformsToAdd) { + Collections.addAll(this.transforms, transformsToAdd); return this; } - public Builder add(Transform.Builder... transforms) { - for (Transform.Builder transform : transforms) { + public Builder add(Transform.Builder... transformsToAdd) { + for (Transform.Builder transform : transformsToAdd) { this.transforms.add(transform.build()); } return this; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java index 9fe1a41be2a2a..fbb273aa6a8bb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/execute/ExecuteWatchRequest.java @@ -26,7 +26,7 @@ import java.util.Map; /** - * An execute watch request to execute a watch by id + * A request to execute a watch by id */ public class ExecuteWatchRequest extends ActionRequest { @@ -195,6 +195,7 @@ public XContentType getXContentType() { /** * @param watchSource instead of using an existing watch use this non persisted watch */ + @SuppressWarnings("HiddenField") public void setWatchSource(BytesReference watchSource, XContentType xContentType) { this.watchSource = watchSource; this.xContentType = xContentType; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java index 743f86a3bffa3..26151d4ae01c4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/Watch.java @@ -138,8 +138,8 @@ public boolean setState(boolean active, ZonedDateTime now) { * * @return {@code true} if the status of this watch changed, {@code false} otherwise. */ - public boolean ack(ZonedDateTime now, String... actions) { - return status.onAck(now, actions); + public boolean ack(ZonedDateTime now, String... actionIds) { + return status.onAck(now, actionIds); } public boolean acked(String actionId) { From 1f933390c146021f22186d2861b199fc9eb5043e Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 29 Nov 2021 21:46:54 +0100 Subject: [PATCH 82/88] Less Verbose Serialization of Snapshot Failure in SLM Metadata (#80942) We should not serialize the full exception including cause(s) and stacktraces here. This can be a string of multiple MBs for a very large cluster that has a large subset of indices/shards failing to snapshot. We can get the full details of what failed for each shard in detail from the repository as well as from logs anyway. If we fail to finalize the snapshot we still get the rough reason for that failure with this change and can look at the logs for more details. --- .../elasticsearch/ElasticsearchException.java | 2 +- .../core/slm/SnapshotInvocationRecord.java | 8 ++-- .../xpack/slm/SnapshotLifecycleTask.java | 42 +++++++------------ .../slm/history/SnapshotHistoryItem.java | 22 ++-------- .../xpack/slm/SnapshotLifecycleTaskTests.java | 1 - 5 files changed, 23 insertions(+), 52 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 1c2e3bc0764f2..8c599774d868e 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -58,7 +58,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte * to control if the {@code caused_by} element should render. Unlike most parameters to {@code toXContent} methods this parameter is * internal only and not available as a URL parameter. */ - private static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip"; + public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip"; /** * Passed in the {@link Params} of {@link #generateThrowableXContent(XContentBuilder, Params, Throwable)} * to control if the {@code stack_trace} element should render. Unlike most parameters to {@code toXContent} methods this parameter is diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotInvocationRecord.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotInvocationRecord.java index 6946d08ed1191..63d92abe9a100 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotInvocationRecord.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotInvocationRecord.java @@ -37,10 +37,10 @@ public class SnapshotInvocationRecord extends AbstractDiffable PARSER = new ConstructingObjectParser<>( "snapshot_policy_invocation_record", diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java index 1f08ca62e67cd..483fdcd167d89 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java @@ -20,13 +20,10 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.TimeValue; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.scheduler.SchedulerEngine; import org.elasticsearch.xpack.core.slm.SnapshotInvocationRecord; @@ -39,13 +36,10 @@ import java.io.IOException; import java.time.Instant; -import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Optional; -import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; - public class SnapshotLifecycleTask implements SchedulerEngine.Listener { private static final Logger logger = LogManager.getLogger(SnapshotLifecycleTask.class); @@ -135,11 +129,6 @@ public void onResponse(CreateSnapshotResponse createSnapshotResponse) { request.snapshot(), "failed to create snapshot successfully, " + failures + " out of " + total + " total shards failed" ); - // Add each failed shard's exception as suppressed, the exception contains - // information about which shard failed - // TODO: this seems wrong, investigate whether we actually need all the shard level exception here given that we - // could be dealing with tens of thousands of them at a time - snapInfo.shardFailures().forEach(e::addSuppressed); // Call the failure handler to register this as a failure and persist it onFailure(e); } @@ -194,13 +183,17 @@ static Optional getSnapPolicyMetadata(final Str ); } + public static String exceptionToString(Exception ex) { + return Strings.toString((builder, params) -> { + ElasticsearchException.generateThrowableXContent(builder, params, ex); + return builder; + }, ToXContent.EMPTY_PARAMS); + } + /** * A cluster state update task to write the result of a snapshot job to the cluster metadata for the associated policy. */ private static class WriteJobStatus extends ClusterStateUpdateTask { - private static final ToXContent.Params STACKTRACE_PARAMS = new ToXContent.MapParams( - Collections.singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false") - ); private final String policyName; private final String snapshotName; @@ -230,18 +223,6 @@ static WriteJobStatus failure(String policyId, String snapshotName, long timesta return new WriteJobStatus(policyId, snapshotName, timestamp, timestamp, Optional.of(exception)); } - private String exceptionToString() throws IOException { - if (exception.isPresent()) { - try (XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder()) { - causeXContentBuilder.startObject(); - ElasticsearchException.generateThrowableXContent(causeXContentBuilder, STACKTRACE_PARAMS, exception.get()); - causeXContentBuilder.endObject(); - return BytesReference.bytes(causeXContentBuilder).utf8ToString(); - } - } - return null; - } - @Override public ClusterState execute(ClusterState currentState) throws Exception { SnapshotLifecycleMetadata snapMeta = currentState.metadata().custom(SnapshotLifecycleMetadata.TYPE); @@ -274,7 +255,14 @@ public ClusterState execute(ClusterState currentState) throws Exception { if (exception.isPresent()) { stats.snapshotFailed(policyName); - newPolicyMetadata.setLastFailure(new SnapshotInvocationRecord(snapshotName, null, snapshotFinishTime, exceptionToString())); + newPolicyMetadata.setLastFailure( + new SnapshotInvocationRecord( + snapshotName, + null, + snapshotFinishTime, + exception.map(SnapshotLifecycleTask::exceptionToString).orElse(null) + ) + ); } else { stats.snapshotTaken(policyName); newPolicyMetadata.setLastSuccess(new SnapshotInvocationRecord(snapshotName, snapshotStartTime, snapshotFinishTime, null)); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java index fd24e697818b5..d273ef63844b5 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryItem.java @@ -7,9 +7,7 @@ package org.elasticsearch.xpack.slm.history; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -19,16 +17,13 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; +import org.elasticsearch.xpack.slm.SnapshotLifecycleTask; import java.io.IOException; -import java.util.Collections; import java.util.Map; import java.util.Objects; -import static org.elasticsearch.ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE; - /** * Represents the record of a Snapshot Lifecycle Management action, so that it * can be indexed in a history index or recorded to a log in a structured way @@ -138,7 +133,7 @@ public static SnapshotHistoryItem creationFailureRecord( String snapshotName, Exception exception ) throws IOException { - String exceptionString = exceptionToString(exception); + String exceptionString = SnapshotLifecycleTask.exceptionToString(exception); return new SnapshotHistoryItem( timeStamp, policy.getId(), @@ -162,7 +157,7 @@ public static SnapshotHistoryItem deletionFailureRecord( String repository, Exception exception ) throws IOException { - String exceptionString = exceptionToString(exception); + String exceptionString = SnapshotLifecycleTask.exceptionToString(exception); return new SnapshotHistoryItem(timestamp, policyId, repository, snapshotName, DELETE_OPERATION, false, null, exceptionString); } @@ -273,15 +268,4 @@ public String toString() { return Strings.toString(this); } - private static String exceptionToString(Exception exception) throws IOException { - Params stacktraceParams = new MapParams(Collections.singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false")); - String exceptionString; - try (XContentBuilder causeXContentBuilder = JsonXContent.contentBuilder()) { - causeXContentBuilder.startObject(); - ElasticsearchException.generateThrowableXContent(causeXContentBuilder, stacktraceParams, exception); - causeXContentBuilder.endObject(); - exceptionString = BytesReference.bytes(causeXContentBuilder).utf8ToString(); - } - return exceptionString; - } } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java index e830f87a7773f..83a94020f95bd 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java @@ -288,7 +288,6 @@ public void testPartialFailureSnapshot() throws Exception { item.getErrorDetails(), containsString("failed to create snapshot successfully, 1 out of 3 total shards failed") ); - assertThat(item.getErrorDetails(), containsString("forced failure")); }); SnapshotLifecycleTask task = new SnapshotLifecycleTask(client, clusterService, historyStore); From 537f371f348207e7c59d28162b61c3720e931d0c Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Mon, 29 Nov 2021 23:49:27 +0200 Subject: [PATCH 83/88] URL option for BaseRunAsSuperuserCommand (#81025) Add a --url option for elasticsearch-reset-password and elasticsearch-create-enrollment-token CLI Tools ( and any tools that would extend BaseRunAsSuperuserCommand ). The tools use CommandLineHttpClient internally, which tries its best to deduce the URL of the local node based on the configuration but there are certain cases where it either fails or returns an unwanted result. Concretely: - CommandLineHttpClient#getDefaultURL will always return a URL with the port set to 9200, unless otherwise explicitly set in the configuration. When running multiple nodes on the same host, subsequent nodes get sequential port numbers after 9200 by default and this means that the CLI tool will always connect the first of n nodes in a given host. Since these tools depend on a file realm local user, requests to other nodes would fail - When an ES node binds and listens to many addresses, there can be the case that not all of the IP addresses are added as SANs in the certificate that is used for TLS on the HTTP layer. CommandLineHttpClient#getDefaultURL will pick an address based on a preference order but that address might not be in the SANs and thus all requests to the node would fail due to failed hostname verification. Manually setting `--url` to an appropriate value allows users to overcome these edge cases. --- .../commands/create-enrollment-token.asciidoc | 20 ++++++-- .../commands/reset-password.asciidoc | 17 ++++++- .../esnative/tool/ResetPasswordTool.java | 7 +-- .../ExternalEnrollmentTokenGenerator.java | 34 ++++++------- .../tool/CreateEnrollmentTokenTool.java | 11 +++- .../tool/BaseRunAsSuperuserCommand.java | 11 ++-- ...ExternalEnrollmentTokenGeneratorTests.java | 50 ++++++++++--------- .../esnative/tool/ResetPasswordToolTests.java | 33 ++++++++++++ .../tool/CreateEnrollmentTokenToolTests.java | 42 ++++++++++++++-- 9 files changed, 164 insertions(+), 61 deletions(-) diff --git a/docs/reference/commands/create-enrollment-token.asciidoc b/docs/reference/commands/create-enrollment-token.asciidoc index 4fd95f1b7bef4..ca95649324702 100644 --- a/docs/reference/commands/create-enrollment-token.asciidoc +++ b/docs/reference/commands/create-enrollment-token.asciidoc @@ -12,7 +12,7 @@ The `elasticsearch-create-enrollment-token` command creates enrollment tokens fo [source,shell] ---- bin/elasticsearch-create-enrollment-token -[-f, --force] [-h, --help] [-E ] [-s, --scope] +[-f, --force] [-h, --help] [-E ] [-s, --scope] [--url] ---- [discrete] @@ -23,7 +23,7 @@ Use this command to create enrollment tokens, which you can use to enroll new with an existing {es} cluster that has security features enabled. The command generates (and subsequently removes) a temporary user in the <> to run the request that creates enrollment tokens. -IMPORTANT: You cannot use this tool if the file realm is disabled in your +IMPORTANT: You cannot use this tool if the file realm is disabled in your `elasticsearch.yml` file. This command uses an HTTP connection to connect to the cluster and run the user @@ -42,12 +42,17 @@ option. For more information about debugging connection failures, see `-E `:: Configures a standard {es} or {xpack} setting. -`-f, --force`:: Forces the command to run against an unhealthy cluster. +`-f, --force`:: Forces the command to run against an unhealthy cluster. `-h, --help`:: Returns all of the command parameters. `-s, --scope`:: Specifies the scope of the generated token. Supported values are `node` and `kibana`. +`--url`:: Specifies the base URL (hostname and port of the local node) that the tool uses to submit API +requests to {es}. The default value is determined from the settings in your +`elasticsearch.yml` file. If `xpack.security.http.ssl.enabled` is set to `true`, +you must specify an HTTPS URL. + [discrete] === Examples @@ -57,3 +62,12 @@ The following command creates an enrollment token for enrolling an {es} node int ---- bin/elasticsearch-create-enrollment-token -s node ---- + +The following command creates an enrollment token for enrolling a {kib} instance into a cluster. +The specified URL indicates where the elasticsearch-create-enrollment-token tool attempts to reach the +local {es} node: + +[source,shell] +---- +bin/elasticsearch-create-enrollment-token -s kibana --url "https://172.0.0.3:9200" +---- diff --git a/docs/reference/commands/reset-password.asciidoc b/docs/reference/commands/reset-password.asciidoc index 012874fd61171..b8823158d0d0f 100644 --- a/docs/reference/commands/reset-password.asciidoc +++ b/docs/reference/commands/reset-password.asciidoc @@ -14,7 +14,7 @@ the native realm and built-in users. bin/elasticsearch-reset-password [-a, --auto] [-b, --batch] [-E , String> httpInfo = getNodeInfo(user, password); + final String apiKey = getApiKeyCredentials(user, password, action, baseUrl); + final Tuple, String> httpInfo = getNodeInfo(user, password, baseUrl); return new EnrollmentToken(apiKey, fingerprint, httpInfo.v2(), httpInfo.v1()); } @@ -89,12 +87,12 @@ private HttpResponse.HttpResponseBuilder responseBuilder(InputStream is) throws return httpResponseBuilder; } - protected URL createAPIKeyUrl() throws MalformedURLException, URISyntaxException { - return new URL(defaultUrl, (defaultUrl.toURI().getPath() + "/_security/api_key").replaceAll("/+", "/")); + protected URL createAPIKeyUrl(URL baseUrl) throws MalformedURLException, URISyntaxException { + return new URL(baseUrl, (baseUrl.toURI().getPath() + "/_security/api_key").replaceAll("/+", "/")); } - protected URL getHttpInfoUrl() throws MalformedURLException, URISyntaxException { - return new URL(defaultUrl, (defaultUrl.toURI().getPath() + "/_nodes/_local/http").replaceAll("/+", "/")); + protected URL getHttpInfoUrl(URL baseUrl) throws MalformedURLException, URISyntaxException { + return new URL(baseUrl, (baseUrl.toURI().getPath() + "/_nodes/_local/http").replaceAll("/+", "/")); } @SuppressWarnings("unchecked") @@ -114,7 +112,7 @@ static String getVersion(Map nodesInfo) { return nodeInfo.get("version").toString(); } - protected String getApiKeyCredentials(String user, SecureString password, String action) throws Exception { + protected String getApiKeyCredentials(String user, SecureString password, String action, URL baseUrl) throws Exception { final CheckedSupplier createApiKeyRequestBodySupplier = () -> { XContentBuilder xContentBuilder = JsonXContent.contentBuilder(); xContentBuilder.startObject() @@ -129,7 +127,7 @@ protected String getApiKeyCredentials(String user, SecureString password, String return Strings.toString(xContentBuilder); }; - final URL createApiKeyUrl = createAPIKeyUrl(); + final URL createApiKeyUrl = createAPIKeyUrl(baseUrl); final HttpResponse httpResponseApiKey = client.execute( "POST", createApiKeyUrl, @@ -155,8 +153,8 @@ protected String getApiKeyCredentials(String user, SecureString password, String return apiId + ":" + apiKey; } - protected Tuple, String> getNodeInfo(String user, SecureString password) throws Exception { - final URL httpInfoUrl = getHttpInfoUrl(); + protected Tuple, String> getNodeInfo(String user, SecureString password, URL baseUrl) throws Exception { + final URL httpInfoUrl = getHttpInfoUrl(baseUrl); final HttpResponse httpResponseHttp = client.execute("GET", httpInfoUrl, user, password, () -> null, is -> responseBuilder(is)); final int httpCode = httpResponseHttp.getHttpStatus(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenTool.java index 84c6ccf4964ea..954badc86e47a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenTool.java @@ -22,16 +22,19 @@ import org.elasticsearch.xpack.security.enrollment.ExternalEnrollmentTokenGenerator; import org.elasticsearch.xpack.security.tool.BaseRunAsSuperuserCommand; +import java.net.URL; import java.util.List; import java.util.function.Function; public class CreateEnrollmentTokenTool extends BaseRunAsSuperuserCommand { private final OptionSpec scope; + private final Function clientFunction; private final CheckedFunction createEnrollmentTokenFunction; static final List ALLOWED_SCOPES = List.of("node", "kibana"); CreateEnrollmentTokenTool() { + this( environment -> new CommandLineHttpClient(environment), environment -> KeyStoreWrapper.load(environment.configFile()), @@ -46,6 +49,7 @@ public class CreateEnrollmentTokenTool extends BaseRunAsSuperuserCommand { ) { super(clientFunction, keyStoreFunction, "Creates enrollment tokens for elasticsearch nodes and kibana instances"); this.createEnrollmentTokenFunction = createEnrollmentTokenFunction; + this.clientFunction = clientFunction; scope = parser.acceptsAll(List.of("scope", "s"), "The scope of this enrollment token, can be either \"node\" or \"kibana\"") .withRequiredArg() .required(); @@ -74,12 +78,15 @@ protected void validate(Terminal terminal, OptionSet options, Environment env) t protected void executeCommand(Terminal terminal, OptionSet options, Environment env, String username, SecureString password) throws Exception { final String tokenScope = scope.value(options); + final URL baseUrl = options.has(urlOption) + ? new URL(options.valueOf(urlOption)) + : new URL(clientFunction.apply(env).getDefaultURL()); try { ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = createEnrollmentTokenFunction.apply(env); if (tokenScope.equals("node")) { - terminal.println(externalEnrollmentTokenGenerator.createNodeEnrollmentToken(username, password).getEncoded()); + terminal.println(externalEnrollmentTokenGenerator.createNodeEnrollmentToken(username, password, baseUrl).getEncoded()); } else { - terminal.println(externalEnrollmentTokenGenerator.createKibanaEnrollmentToken(username, password).getEncoded()); + terminal.println(externalEnrollmentTokenGenerator.createKibanaEnrollmentToken(username, password, baseUrl).getEncoded()); } } catch (Exception e) { terminal.errorPrintln("Unable to create enrollment token for scope [" + tokenScope + "]"); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java index 6909da4df03bb..c0ed18d0dc6b9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tool/BaseRunAsSuperuserCommand.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.security.tool; import joptsimple.OptionSet; +import joptsimple.OptionSpec; import joptsimple.OptionSpecBuilder; import org.elasticsearch.Version; @@ -57,6 +58,7 @@ public abstract class BaseRunAsSuperuserCommand extends KeyStoreAwareCommand { private static final int PASSWORD_LENGTH = 14; private final OptionSpecBuilder force; + protected final OptionSpec urlOption; private final Function clientFunction; private final CheckedFunction keyStoreFunction; @@ -72,6 +74,7 @@ public BaseRunAsSuperuserCommand( List.of("f", "force"), "Use this option to force execution of the command against a cluster that is currently unhealthy." ); + urlOption = parser.accepts("url", "the URL where the elasticsearch node listens for connections.").withRequiredArg(); } @Override @@ -120,7 +123,7 @@ protected final void execute(Terminal terminal, OptionSet options, Environment e attributesChecker.check(terminal); final boolean forceExecution = options.has(force); - checkClusterHealthWithRetries(newEnv, terminal, username, password, 5, forceExecution); + checkClusterHealthWithRetries(newEnv, options, terminal, username, password, 5, forceExecution); executeCommand(terminal, options, newEnv, username, password); } catch (Exception e) { int exitCode; @@ -195,6 +198,7 @@ private void ensureFileRealmEnabled(Settings settings) throws Exception { */ private void checkClusterHealthWithRetries( Environment env, + OptionSet options, Terminal terminal, String username, SecureString password, @@ -202,7 +206,8 @@ private void checkClusterHealthWithRetries( boolean force ) throws Exception { CommandLineHttpClient client = clientFunction.apply(env); - final URL clusterHealthUrl = CommandLineHttpClient.createURL(new URL(client.getDefaultURL()), "_cluster/health", "?pretty"); + final URL baseUrl = options.has(urlOption) ? new URL(options.valueOf(urlOption)) : new URL(client.getDefaultURL()); + final URL clusterHealthUrl = CommandLineHttpClient.createURL(baseUrl, "_cluster/health", "?pretty"); final HttpResponse response; try { response = client.execute("GET", clusterHealthUrl, username, password, () -> null, CommandLineHttpClient::responseBuilder); @@ -225,7 +230,7 @@ private void checkClusterHealthWithRetries( ); Thread.sleep(1000); retries -= 1; - checkClusterHealthWithRetries(env, terminal, username, password, retries, force); + checkClusterHealthWithRetries(env, options, terminal, username, password, retries, force); } else { throw new UserException( ExitCodes.DATA_ERROR, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java index 63824c270e7c9..339b3de9ecb49 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/enrollment/ExternalEnrollmentTokenGeneratorTests.java @@ -83,10 +83,10 @@ public void setupMocks() throws Exception { public void testCreateSuccess() throws Exception { final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(); - final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(); + final URL baseURL = new URL("http://localhost:9200"); + final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); + final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); final HttpResponse httpResponseOK = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap<>()); when(client.execute(anyString(), any(URL.class), anyString(), any(SecureString.class), anyCheckedSupplier(), anyCheckedFunction())) @@ -147,7 +147,8 @@ public void testCreateSuccess() throws Exception { final String tokenNode = externalEnrollmentTokenGenerator.createNodeEnrollmentToken( "elastic", - new SecureString("elastic".toCharArray()) + new SecureString("elastic".toCharArray()), + baseURL ).getEncoded(); Map infoNode = getDecoded(tokenNode); @@ -158,7 +159,8 @@ public void testCreateSuccess() throws Exception { final String tokenKibana = externalEnrollmentTokenGenerator.createKibanaEnrollmentToken( "elastic", - new SecureString("elastic".toCharArray()) + new SecureString("elastic".toCharArray()), + baseURL ).getEncoded(); Map infoKibana = getDecoded(tokenKibana); @@ -170,9 +172,9 @@ public void testCreateSuccess() throws Exception { public void testFailedCreateApiKey() throws Exception { final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); + final URL baseURL = new URL("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(); + final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); final HttpResponse httpResponseNotOK = new HttpResponse(HttpURLConnection.HTTP_BAD_REQUEST, new HashMap<>()); when( @@ -188,7 +190,7 @@ public void testFailedCreateApiKey() throws Exception { IllegalStateException ex = expectThrows( IllegalStateException.class, - () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray())) + () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray()), baseURL) .getEncoded() ); assertThat(ex.getMessage(), Matchers.containsString("Unexpected response code [400] from calling POST ")); @@ -196,10 +198,10 @@ public void testFailedCreateApiKey() throws Exception { public void testFailedRetrieveHttpInfo() throws Exception { final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); + final URL baseURL = new URL("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(); - final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(); + final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); + final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); final HttpResponse httpResponseOK = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap<>()); when( @@ -248,7 +250,7 @@ public void testFailedRetrieveHttpInfo() throws Exception { IllegalStateException ex = expectThrows( IllegalStateException.class, - () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray())) + () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray()), baseURL) .getEncoded() ); assertThat(ex.getMessage(), Matchers.containsString("Unexpected response code [400] from calling GET ")); @@ -274,10 +276,10 @@ public void testFailedNoCaInKeystore() throws Exception { .build(); environment = new Environment(settings, tempDir); final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(); - final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(); + final URL baseURL = new URL("http://localhost:9200"); + final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); + final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); final HttpResponse httpResponseOK = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap<>()); when( @@ -326,7 +328,7 @@ public void testFailedNoCaInKeystore() throws Exception { IllegalStateException ex = expectThrows( IllegalStateException.class, - () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray())) + () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray()), baseURL) .getEncoded() ); assertThat( @@ -358,10 +360,10 @@ public void testFailedManyCaInKeystore() throws Exception { .build(); environment = new Environment(settings, tempDir); final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator(environment, client); - final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(); - final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(); + final URL baseURL = new URL("http://localhost:9200"); + final URL createAPIKeyURL = externalEnrollmentTokenGenerator.createAPIKeyUrl(baseURL); + final URL getHttpInfoURL = externalEnrollmentTokenGenerator.getHttpInfoUrl(baseURL); final HttpResponse httpResponseOK = new HttpResponse(HttpURLConnection.HTTP_OK, new HashMap<>()); when( @@ -410,7 +412,7 @@ public void testFailedManyCaInKeystore() throws Exception { IllegalStateException ex = expectThrows( IllegalStateException.class, - () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray())) + () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray()), baseURL) .getEncoded() ); assertThat( @@ -431,7 +433,7 @@ public void testNoKeyStore() throws Exception { .build(); final Environment environment_no_keystore = new Environment(settings, tempDir); final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); + final URL baseURL = new URL("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator( environment_no_keystore, client @@ -439,7 +441,7 @@ public void testNoKeyStore() throws Exception { IllegalStateException ex = expectThrows( IllegalStateException.class, - () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray())) + () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray()), baseURL) .getEncoded() ); assertThat( @@ -467,7 +469,7 @@ public void testEnrollmentNotEnabled() throws Exception { .build(); final Environment environment_not_enabled = new Environment(settings, tempDir); final CommandLineHttpClient client = mock(CommandLineHttpClient.class); - when(client.getDefaultURL()).thenReturn("http://localhost:9200"); + final URL baseURL = new URL("http://localhost:9200"); final ExternalEnrollmentTokenGenerator externalEnrollmentTokenGenerator = new ExternalEnrollmentTokenGenerator( environment_not_enabled, client @@ -475,7 +477,7 @@ public void testEnrollmentNotEnabled() throws Exception { IllegalStateException ex = expectThrows( IllegalStateException.class, - () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray())) + () -> externalEnrollmentTokenGenerator.createNodeEnrollmentToken("elastic", new SecureString("elastic".toCharArray()), baseURL) .getEncoded() ); assertThat( diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/ResetPasswordToolTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/ResetPasswordToolTests.java index 330df329b4ded..943b2770172a5 100644 --- a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/ResetPasswordToolTests.java +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/ResetPasswordToolTests.java @@ -152,6 +152,39 @@ public void testSuccessInteractiveMode() throws Exception { assertThat(output, containsString("Password for the [" + user + "] user successfully reset.")); } + public void testUserCanPassUrlParameter() throws Exception { + URL url = new URL("http://localhost:9204"); + HttpResponse healthResponse = new HttpResponse(HttpURLConnection.HTTP_OK, Map.of("status", randomFrom("yellow", "green"))); + when( + client.execute( + anyString(), + eq(clusterHealthUrl(url)), + anyString(), + any(SecureString.class), + any(CheckedSupplier.class), + any(CheckedFunction.class) + ) + ).thenReturn(healthResponse); + HttpResponse changePasswordResponse = new HttpResponse(HttpURLConnection.HTTP_OK, Map.of()); + when( + client.execute( + anyString(), + eq(changePasswordUrl(url, user)), + anyString(), + any(SecureString.class), + any(CheckedSupplier.class), + any(CheckedFunction.class) + ) + ).thenReturn(changePasswordResponse); + terminal.addTextInput("y"); + execute(randomFrom("-u", "--username"), user, "--url", "http://localhost:9204"); + String output = terminal.getOutput(); + assertThat(output, containsString("This tool will reset the password of the [" + user + "] user to an autogenerated value.")); + assertThat(output, containsString("The password will be printed in the console.")); + assertThat(output, containsString("Password for the [" + user + "] user successfully reset.")); + assertThat(output, containsString("New value:")); + } + public void testUserCancelledAutoMode() throws Exception { terminal.addTextInput("n"); UserException e = expectThrows(UserException.class, () -> execute(randomFrom("-u", "--username"), user)); diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenToolTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenToolTests.java index 16c7120ab3a76..d322ae2cfdc5d 100644 --- a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenToolTests.java +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/enrollment/tool/CreateEnrollmentTokenToolTests.java @@ -134,8 +134,12 @@ public void setup() throws Exception { "8.0.0", Arrays.asList("[192.168.0.1:9201, 172.16.254.1:9202") ); - when(externalEnrollmentTokenGenerator.createKibanaEnrollmentToken(anyString(), any(SecureString.class))).thenReturn(kibanaToken); - when(externalEnrollmentTokenGenerator.createNodeEnrollmentToken(anyString(), any(SecureString.class))).thenReturn(nodeToken); + when(externalEnrollmentTokenGenerator.createKibanaEnrollmentToken(anyString(), any(SecureString.class), any(URL.class))).thenReturn( + kibanaToken + ); + when(externalEnrollmentTokenGenerator.createNodeEnrollmentToken(anyString(), any(SecureString.class), any(URL.class))).thenReturn( + nodeToken + ); } @AfterClass @@ -167,6 +171,36 @@ public void testInvalidScope() throws Exception { ); } + public void testUserCanPassUrl() throws Exception { + HttpResponse healthResponse = new HttpResponse(HttpURLConnection.HTTP_OK, Map.of("status", randomFrom("yellow", "green"))); + when( + client.execute( + anyString(), + eq(clusterHealthUrl(new URL("http://localhost:9204"))), + anyString(), + any(SecureString.class), + any(CheckedSupplier.class), + any(CheckedFunction.class) + ) + ).thenReturn(healthResponse); + EnrollmentToken kibanaToken = new EnrollmentToken( + "DR6CzXkBDf8amV_48yYX:x3YqU_rqQwm-ESrkExcnOg", + "ce480d53728605674fcfd8ffb51000d8a33bf32de7c7f1e26b4d428f8a91362d", + "8.0.0", + Arrays.asList("[192.168.0.1:9201, 172.16.254.1:9202") + ); + when( + externalEnrollmentTokenGenerator.createKibanaEnrollmentToken( + anyString(), + any(SecureString.class), + eq(new URL("http://localhost:9204")) + ) + ).thenReturn(kibanaToken); + String output = execute("--scope", "kibana", "--url", "http://localhost:9204"); + assertThat(output, containsString("1WXzQ4eVlYOngzWXFVX3JxUXdtLUVTcmtFeGNuT2cifQ==")); + + } + public void testUnhealthyCluster() throws Exception { String scope = randomBoolean() ? "node" : "kibana"; URL url = new URL(client.getDefaultURL()); @@ -207,10 +241,10 @@ public void testEnrollmentDisabled() { public void testUnableToCreateToken() throws Exception { this.externalEnrollmentTokenGenerator = mock(ExternalEnrollmentTokenGenerator.class); - when(externalEnrollmentTokenGenerator.createKibanaEnrollmentToken(anyString(), any(SecureString.class))).thenThrow( + when(externalEnrollmentTokenGenerator.createKibanaEnrollmentToken(anyString(), any(SecureString.class), any(URL.class))).thenThrow( new IllegalStateException("example exception message") ); - when(externalEnrollmentTokenGenerator.createNodeEnrollmentToken(anyString(), any(SecureString.class))).thenThrow( + when(externalEnrollmentTokenGenerator.createNodeEnrollmentToken(anyString(), any(SecureString.class), any(URL.class))).thenThrow( new IllegalStateException("example exception message") ); String scope = randomBoolean() ? "node" : "kibana"; From 806abee75a7c02eb438242bd51d72b236730bf53 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 30 Nov 2021 10:12:27 +1100 Subject: [PATCH 84/88] Optimize DLS bitset building for matchAll query (#81030) The PR avoids creating Weight and Scorer and stepping through docIterator when building DLS bitSet for an effective matchAll query. Instead it returns a MatchAllRoleBitSet directly after query rewritten for this scenario. Resolves: #80904 --- .../DocumentSubsetBitsetCache.java | 19 ++++++++++++++++++- .../DocumentSubsetBitsetCacheTests.java | 10 ++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java index 1f0d9ca8bca8d..070f67a626c46 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCache.java @@ -13,8 +13,10 @@ import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; @@ -273,7 +275,11 @@ private BitSet computeBitSet(Query query, LeafReaderContext context) throws IOEx final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context); final IndexSearcher searcher = new IndexSearcher(topLevelContext); searcher.setQueryCache(null); - final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f); + final Query rewrittenQuery = searcher.rewrite(query); + if (isEffectiveMatchAllDocsQuery(rewrittenQuery)) { + return new MatchAllRoleBitSet(context.reader().maxDoc()); + } + final Weight weight = searcher.createWeight(rewrittenQuery, ScoreMode.COMPLETE_NO_SCORES, 1f); final Scorer s = weight.scorer(context); if (s == null) { return null; @@ -282,6 +288,17 @@ private BitSet computeBitSet(Query query, LeafReaderContext context) throws IOEx } } + // Package private for testing + static boolean isEffectiveMatchAllDocsQuery(Query rewrittenQuery) { + if (rewrittenQuery instanceof ConstantScoreQuery && ((ConstantScoreQuery) rewrittenQuery).getQuery() instanceof MatchAllDocsQuery) { + return true; + } + if (rewrittenQuery instanceof MatchAllDocsQuery) { + return true; + } + return false; + } + private void maybeLogCacheFullWarning() { final long nextLogTime = cacheFullWarningTime.get(); final long now = System.currentTimeMillis(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java index ac99e492088a2..286d1785b5aad 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/DocumentSubsetBitsetCacheTests.java @@ -19,9 +19,13 @@ import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSetIterator; @@ -515,6 +519,12 @@ public void testMatchAllRoleBitSet() throws Exception { } } + public void testEquivalentMatchAllDocsQuery() { + assertTrue(DocumentSubsetBitsetCache.isEffectiveMatchAllDocsQuery(new MatchAllDocsQuery())); + assertTrue(DocumentSubsetBitsetCache.isEffectiveMatchAllDocsQuery(new ConstantScoreQuery(new MatchAllDocsQuery()))); + assertFalse(DocumentSubsetBitsetCache.isEffectiveMatchAllDocsQuery(new TermQuery(new Term("term")))); + } + private void runTestOnIndex(CheckedBiConsumer body) throws Exception { runTestOnIndices(1, ctx -> { final TestIndexContext indexContext = ctx.get(0); From 2629c32efd53809f1792f093567e19cfcf44bf29 Mon Sep 17 00:00:00 2001 From: weizijun Date: Tue, 30 Nov 2021 07:24:51 +0800 Subject: [PATCH 85/88] Fix ComposableIndexTemplate equals when composed_of is null (#80864) when composed_of is null, the ComposableIndexTemplate will return an empty list. it will cause the input and output ComposableIndexTemplate not equals. reproduce: in ComposableIndexTemplateTests.randomInstance method, make `List componentTemplates = null;`, there are the failed tests: ``` Tests with failures: - org.elasticsearch.cluster.metadata.MetadataIndexTemplateServiceTests.testAddIndexTemplateV2 - org.elasticsearch.cluster.metadata.MetadataIndexTemplateServiceTests.testRemoveMultipleIndexTemplateV2Wildcards - org.elasticsearch.cluster.metadata.MetadataIndexTemplateServiceTests.testUpdateIndexTemplateV2 - org.elasticsearch.cluster.metadata.MetadataIndexTemplateServiceTests.testRemoveMultipleIndexTemplateV2 ``` the PR add a `componentTemplatesEquals` method to make null and empty list equals . --- .../cluster/metadata/ComposableIndexTemplate.java | 15 ++++++++++++++- .../metadata/ComposableIndexTemplateTests.java | 14 ++++++++++++-- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index a7d1f8f9580bb..f7f5f84ab93b4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -275,7 +275,7 @@ public boolean equals(Object obj) { ComposableIndexTemplate other = (ComposableIndexTemplate) obj; return Objects.equals(this.indexPatterns, other.indexPatterns) && Objects.equals(this.template, other.template) - && Objects.equals(this.componentTemplates, other.componentTemplates) + && componentTemplatesEquals(this.componentTemplates, other.componentTemplates) && Objects.equals(this.priority, other.priority) && Objects.equals(this.version, other.version) && Objects.equals(this.metadata, other.metadata) @@ -283,6 +283,19 @@ public boolean equals(Object obj) { && Objects.equals(this.allowAutoCreate, other.allowAutoCreate); } + static boolean componentTemplatesEquals(List c1, List c2) { + if (Objects.equals(c1, c2)) { + return true; + } + if (c1 == null && c2.isEmpty()) { + return true; + } + if (c2 == null && c1.isEmpty()) { + return true; + } + return false; + } + @Override public String toString() { return Strings.toString(this); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java index 090d36e3cec26..fed508df0ba41 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateTests.java @@ -21,6 +21,8 @@ import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; + public class ComposableIndexTemplateTests extends AbstractDiffableSerializationTestCase { @Override protected ComposableIndexTemplate makeTestChanges(ComposableIndexTemplate testInstance) { @@ -79,11 +81,10 @@ public static ComposableIndexTemplate randomInstance() { } List indexPatterns = randomList(1, 4, () -> randomAlphaOfLength(4)); - List componentTemplates = randomList(0, 10, () -> randomAlphaOfLength(5)); return new ComposableIndexTemplate( indexPatterns, template, - componentTemplates, + randomBoolean() ? null : randomList(0, 10, () -> randomAlphaOfLength(5)), randomBoolean() ? null : randomNonNegativeLong(), randomBoolean() ? null : randomNonNegativeLong(), meta, @@ -242,4 +243,13 @@ public static ComposableIndexTemplate mutateTemplate(ComposableIndexTemplate ori throw new IllegalStateException("illegal randomization branch"); } } + + public void testComponentTemplatesEquals() { + assertThat(ComposableIndexTemplate.componentTemplatesEquals(null, null), equalTo(true)); + assertThat(ComposableIndexTemplate.componentTemplatesEquals(null, List.of()), equalTo(true)); + assertThat(ComposableIndexTemplate.componentTemplatesEquals(List.of(), null), equalTo(true)); + assertThat(ComposableIndexTemplate.componentTemplatesEquals(List.of(), List.of()), equalTo(true)); + assertThat(ComposableIndexTemplate.componentTemplatesEquals(List.of(randomAlphaOfLength(5)), List.of()), equalTo(false)); + assertThat(ComposableIndexTemplate.componentTemplatesEquals(List.of(), List.of(randomAlphaOfLength(5))), equalTo(false)); + } } From e20fe6d6390b46d21a09cbdd63f75753cd8d1b0d Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 30 Nov 2021 09:30:05 +0100 Subject: [PATCH 86/88] Add replicated field to get data stream api response. (#80988) Internally we already kept track of whether a data stream is replicated by CCR. It is part of the `DataStream` class. This just adds it to the xcontent serialization of the get data stream api response class. Relates to elastic/kibana#118899 --- .../client/indices/DataStream.java | 21 +++++++++++++++---- .../change-mappings-and-settings.asciidoc | 3 ++- .../indices/get-data-stream.asciidoc | 15 +++++++++++-- .../core/action/GetDataStreamAction.java | 2 ++ .../test/data_stream/10_basic.yml | 6 ++++-- 5 files changed, 38 insertions(+), 9 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/DataStream.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/DataStream.java index 047b13b0d2ef4..78450a58d50a6 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/DataStream.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/DataStream.java @@ -35,6 +35,7 @@ public final class DataStream { @Nullable private final Map metadata; private final boolean allowCustomRouting; + private final boolean replicated; public DataStream( String name, @@ -47,7 +48,8 @@ public DataStream( @Nullable Map metadata, boolean hidden, boolean system, - boolean allowCustomRouting + boolean allowCustomRouting, + boolean replicated ) { this.name = name; this.timeStampField = timeStampField; @@ -60,6 +62,7 @@ public DataStream( this.hidden = hidden; this.system = system; this.allowCustomRouting = allowCustomRouting; + this.replicated = replicated; } public String getName() { @@ -106,6 +109,10 @@ public boolean allowsCustomRouting() { return allowCustomRouting; } + public boolean isReplicated() { + return replicated; + } + public static final ParseField NAME_FIELD = new ParseField("name"); public static final ParseField TIMESTAMP_FIELD_FIELD = new ParseField("timestamp_field"); public static final ParseField INDICES_FIELD = new ParseField("indices"); @@ -117,6 +124,7 @@ public boolean allowsCustomRouting() { public static final ParseField HIDDEN_FIELD = new ParseField("hidden"); public static final ParseField SYSTEM_FIELD = new ParseField("system"); public static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing"); + public static final ParseField REPLICATED = new ParseField("replicated"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("data_stream", args -> { @@ -132,6 +140,7 @@ public boolean allowsCustomRouting() { boolean hidden = args[8] != null && (boolean) args[8]; boolean system = args[9] != null && (boolean) args[9]; boolean allowCustomRouting = args[10] != null && (boolean) args[10]; + boolean replicated = args[11] != null && (boolean) args[11]; return new DataStream( dataStreamName, timeStampField, @@ -143,7 +152,8 @@ public boolean allowsCustomRouting() { metadata, hidden, system, - allowCustomRouting + allowCustomRouting, + replicated ); }); @@ -159,6 +169,7 @@ public boolean allowsCustomRouting() { PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), HIDDEN_FIELD); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), SYSTEM_FIELD); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_CUSTOM_ROUTING); + PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), REPLICATED); } public static DataStream fromXContent(XContentParser parser) throws IOException { @@ -180,7 +191,8 @@ public boolean equals(Object o) { && Objects.equals(indexTemplate, that.indexTemplate) && Objects.equals(ilmPolicyName, that.ilmPolicyName) && Objects.equals(metadata, that.metadata) - && allowCustomRouting == that.allowCustomRouting; + && allowCustomRouting == that.allowCustomRouting + && replicated == that.replicated; } @Override @@ -196,7 +208,8 @@ public int hashCode() { metadata, hidden, system, - allowCustomRouting + allowCustomRouting, + replicated ); } } diff --git a/docs/reference/data-streams/change-mappings-and-settings.asciidoc b/docs/reference/data-streams/change-mappings-and-settings.asciidoc index 4a74cae5f8bb8..40ab1618c947b 100644 --- a/docs/reference/data-streams/change-mappings-and-settings.asciidoc +++ b/docs/reference/data-streams/change-mappings-and-settings.asciidoc @@ -578,7 +578,8 @@ stream's oldest backing index. "template": "my-data-stream-template", "hidden": false, "system": false, - "allow_custom_routing": false + "allow_custom_routing": false, + "replicated": false } ] } diff --git a/docs/reference/indices/get-data-stream.asciidoc b/docs/reference/indices/get-data-stream.asciidoc index 33bfab859a8ce..0a1abdc2cd669 100644 --- a/docs/reference/indices/get-data-stream.asciidoc +++ b/docs/reference/indices/get-data-stream.asciidoc @@ -207,6 +207,15 @@ use the <>. (Boolean) If `true`, the data stream is created and managed by an Elastic stack component and cannot be modified through normal user interaction. + +`allow_custom_routing`:: +(Boolean) +If `true`, the data stream this data stream allows custom routing on write request. + +`replicated`:: +(Boolean) +If `true`, the data stream is created and managed by {ccr} and the local +cluster can not write into this data stream or change its mappings. ==== [[get-data-stream-api-example]] @@ -247,7 +256,8 @@ The API returns the following response: "ilm_policy": "my-lifecycle-policy", "hidden": false, "system": false, - "allow_custom_routing": false + "allow_custom_routing": false, + "replicated": false }, { "name": "my-data-stream-two", @@ -269,7 +279,8 @@ The API returns the following response: "ilm_policy": "my-lifecycle-policy", "hidden": false, "system": false, - "allow_custom_routing": false + "allow_custom_routing": false, + "replicated": false } ] } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/GetDataStreamAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/GetDataStreamAction.java index 840ffd730b8e1..4bfeb28ef999c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/GetDataStreamAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/GetDataStreamAction.java @@ -120,6 +120,7 @@ public static class DataStreamInfo extends AbstractDiffable impl public static final ParseField HIDDEN_FIELD = new ParseField("hidden"); public static final ParseField SYSTEM_FIELD = new ParseField("system"); public static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing"); + public static final ParseField REPLICATED = new ParseField("replicated"); DataStream dataStream; ClusterHealthStatus dataStreamStatus; @@ -190,6 +191,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(HIDDEN_FIELD.getPreferredName(), dataStream.isHidden()); builder.field(SYSTEM_FIELD.getPreferredName(), dataStream.isSystem()); builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), dataStream.isAllowCustomRouting()); + builder.field(REPLICATED.getPreferredName(), dataStream.isReplicated()); builder.endObject(); return builder; } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml index 7f243c057b01a..2414f48883871 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/data_stream/10_basic.yml @@ -54,13 +54,15 @@ setup: - match: { data_streams.0.status: 'GREEN' } - match: { data_streams.0.template: 'my-template1' } - match: { data_streams.0.hidden: false } + - match: { data_streams.0.replicated: false } - match: { data_streams.1.name: simple-data-stream2 } - match: { data_streams.1.timestamp_field.name: '@timestamp' } - - match: { data_streams.0.generation: 1 } + - match: { data_streams.1.generation: 1 } - length: { data_streams.1.indices: 1 } - match: { data_streams.1.indices.0.index_name: '/\.ds-simple-data-stream2-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - match: { data_streams.1.template: 'my-template2' } - - match: { data_streams.0.hidden: false } + - match: { data_streams.1.hidden: false } + - match: { data_streams.1.replicated: false } # save the backing index names for later use - set: { data_streams.0.indices.0.index_name: idx0name } From 5c4330292ffead1218ddba896431ca04408f766c Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 30 Nov 2021 09:31:26 +0100 Subject: [PATCH 87/88] Fix data stream alias validation. (#81040) In case of restoring a snapshot, it is possible to overwrite an existing data stream with a data stream alias from a snapshot. This change fixes this by improving the generic duplicate name validation. On top of this the lack of data stream alias validation in Metadata.Builder#build() method resulted in cases where data stream aliases could be added for existing index aliases, data streams or indices with the same name. Closes #80972 --- .../cluster/metadata/Metadata.java | 19 +- .../cluster/metadata/MetadataTests.java | 75 +++++++ .../datastreams/DataStreamIT.java | 184 +++++++++++++++++- .../datastreams/DataStreamsSnapshotsIT.java | 31 +++ 4 files changed, 305 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 531f85d981827..3835772d5bcac 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -1587,17 +1587,23 @@ public Metadata build(boolean builtIndicesLookupEagerly) { indexMetadata.getAliases().keysIt().forEachRemaining(allAliases::add); } + final ArrayList duplicates = new ArrayList<>(); final Set allDataStreams = new HashSet<>(); DataStreamMetadata dataStreamMetadata = (DataStreamMetadata) this.customs.get(DataStreamMetadata.TYPE); if (dataStreamMetadata != null) { for (DataStream dataStream : dataStreamMetadata.dataStreams().values()) { allDataStreams.add(dataStream.getName()); } + // Adding data stream aliases: + for (String dataStreamAlias : dataStreamMetadata.getDataStreamAliases().keySet()) { + if (allAliases.add(dataStreamAlias) == false) { + duplicates.add("data stream alias and indices alias have the same name (" + dataStreamAlias + ")"); + } + } } final Set aliasDuplicatesWithIndices = new HashSet<>(allAliases); aliasDuplicatesWithIndices.retainAll(allIndices); - ArrayList duplicates = new ArrayList<>(); if (aliasDuplicatesWithIndices.isEmpty() == false) { // iterate again and constructs a helpful message for (ObjectCursor cursor : indices.values()) { @@ -1613,12 +1619,19 @@ public Metadata build(boolean builtIndicesLookupEagerly) { aliasDuplicatesWithDataStreams.retainAll(allDataStreams); if (aliasDuplicatesWithDataStreams.isEmpty() == false) { // iterate again and constructs a helpful message - for (ObjectCursor cursor : indices.values()) { - for (String alias : aliasDuplicatesWithDataStreams) { + for (String alias : aliasDuplicatesWithDataStreams) { + // reported var avoids adding a message twice if an index alias has the same name as a data stream. + boolean reported = false; + for (ObjectCursor cursor : indices.values()) { if (cursor.value.getAliases().containsKey(alias)) { duplicates.add(alias + " (alias of " + cursor.value.getIndex() + ") conflicts with data stream"); + reported = true; } } + // This is for adding an error message for when a data steam alias has the same name as a data stream. + if (reported == false && dataStreamMetadata != null && dataStreamMetadata.dataStreams().containsKey(alias)) { + duplicates.add("data stream alias and data stream have the same name (" + alias + ")"); + } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index 7061d9556b82c..fde75d6f608c1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -1215,6 +1215,71 @@ public void testBuildIndicesLookupForDataStreamAliases() { assertThat(value.getAliases(), nullValue()); } + public void testDataStreamAliasValidation() { + Metadata.Builder b = Metadata.builder(); + addDataStream("my-alias", b); + b.put("my-alias", "my-alias", null, null); + var e = expectThrows(IllegalStateException.class, b::build); + assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); + + b = Metadata.builder(); + addDataStream("d1", b); + addDataStream("my-alias", b); + b.put("my-alias", "d1", null, null); + e = expectThrows(IllegalStateException.class, b::build); + assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); + + b = Metadata.builder(); + b.put( + IndexMetadata.builder("index1") + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + .putAlias(new AliasMetadata.Builder("my-alias")) + ); + + addDataStream("d1", b); + b.put("my-alias", "d1", null, null); + e = expectThrows(IllegalStateException.class, b::build); + assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (my-alias)")); + } + + public void testDataStreamAliasValidationRestoreScenario() { + Metadata.Builder b = Metadata.builder(); + b.dataStreams( + Map.of("my-alias", createDataStream("my-alias")), + Map.of("my-alias", new DataStreamAlias("my-alias", List.of("my-alias"), null, null)) + ); + var e = expectThrows(IllegalStateException.class, b::build); + assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); + + b = Metadata.builder(); + b.dataStreams( + Map.of("d1", createDataStream("d1"), "my-alias", createDataStream("my-alias")), + Map.of("my-alias", new DataStreamAlias("my-alias", List.of("d1"), null, null)) + ); + e = expectThrows(IllegalStateException.class, b::build); + assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); + + b = Metadata.builder(); + b.put( + IndexMetadata.builder("index1") + .settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + .putAlias(new AliasMetadata.Builder("my-alias")) + ); + b.dataStreams(Map.of("d1", createDataStream("d1")), Map.of("my-alias", new DataStreamAlias("my-alias", List.of("d1"), null, null))); + e = expectThrows(IllegalStateException.class, b::build); + assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (my-alias)")); + } + private void addDataStream(String name, Metadata.Builder b) { int numBackingIndices = randomIntBetween(1, 4); List indices = new ArrayList<>(numBackingIndices); @@ -1226,6 +1291,16 @@ private void addDataStream(String name, Metadata.Builder b) { b.put(new DataStream(name, createTimestampField("@timestamp"), indices)); } + private DataStream createDataStream(String name) { + int numBackingIndices = randomIntBetween(1, 4); + List indices = new ArrayList<>(numBackingIndices); + for (int j = 1; j <= numBackingIndices; j++) { + IndexMetadata idx = createBackingIndex(name, j).build(); + indices.add(idx.getIndex()); + } + return new DataStream(name, createTimestampField("@timestamp"), indices); + } + public void testIndicesLookupRecordsDataStreamForBackingIndices() { final int numIndices = randomIntBetween(2, 5); final int numBackingIndices = randomIntBetween(2, 5); diff --git a/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index be073aa840d4d..a820b4d382d28 100644 --- a/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -14,10 +14,12 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; @@ -58,6 +60,7 @@ import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.indices.InvalidAliasNameException; +import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; @@ -1531,6 +1534,174 @@ public void testSegmentsSortedOnTimestampDesc() throws Exception { assertTrue(timestamp2 > timestamp3); } + public void testCreateDataStreamWithSameNameAsIndexAlias() throws Exception { + CreateIndexRequest createIndexRequest = new CreateIndexRequest("my-index").alias(new Alias("my-alias")); + assertAcked(client().admin().indices().create(createIndexRequest).actionGet()); + + // Important detail: create template with data stream template after the index has been created + DataStreamIT.putComposableIndexTemplate("my-template", List.of("my-*")); + + var request = new CreateDataStreamAction.Request("my-alias"); + var e = expectThrows(IllegalStateException.class, () -> client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + assertThat(e.getMessage(), containsString("[my-alias (alias of [")); + assertThat(e.getMessage(), containsString("]) conflicts with data stream")); + } + + public void testCreateDataStreamWithSameNameAsIndex() throws Exception { + CreateIndexRequest createIndexRequest = new CreateIndexRequest("my-index").alias(new Alias("my-alias")); + assertAcked(client().admin().indices().create(createIndexRequest).actionGet()); + + // Important detail: create template with data stream template after the index has been created + DataStreamIT.putComposableIndexTemplate("my-template", List.of("my-*")); + + var request = new CreateDataStreamAction.Request("my-index"); + var e = expectThrows(IllegalStateException.class, () -> client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + assertThat(e.getMessage(), containsString("data stream [my-index] conflicts with index")); + } + + public void testCreateDataStreamWithSameNameAsDataStreamAlias() throws Exception { + { + DataStreamIT.putComposableIndexTemplate("my-template", List.of("my-*")); + var request = new CreateDataStreamAction.Request("my-ds"); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + var aliasesAddRequest = new IndicesAliasesRequest(); + aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("my-ds").aliases("my-alias")); + assertAcked(client().admin().indices().aliases(aliasesAddRequest).actionGet()); + + var request2 = new CreateDataStreamAction.Request("my-alias"); + var e = expectThrows( + IllegalStateException.class, + () -> client().execute(CreateDataStreamAction.INSTANCE, request2).actionGet() + ); + assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); + } + { + assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).actionGet()); + DataStreamIT.putComposableIndexTemplate( + "my-template", + null, + List.of("my-*"), + null, + null, + Map.of("my-alias", AliasMetadata.builder("my-alias").build()) + ); + var request = new CreateDataStreamAction.Request("my-ds"); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + + var request2 = new CreateDataStreamAction.Request("my-alias"); + var e = expectThrows( + IllegalStateException.class, + () -> client().execute(CreateDataStreamAction.INSTANCE, request2).actionGet() + ); + assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); + } + } + + public void testCreateDataStreamAliasWithSameNameAsIndexAlias() throws Exception { + { + DataStreamIT.putComposableIndexTemplate("my-template", List.of("logs-*")); + CreateIndexRequest createIndexRequest = new CreateIndexRequest("es-logs").alias(new Alias("logs")); + assertAcked(client().admin().indices().create(createIndexRequest).actionGet()); + + var request = new CreateDataStreamAction.Request("logs-es"); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); + aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("logs-es").aliases("logs")); + var e = expectThrows(IllegalStateException.class, () -> client().admin().indices().aliases(aliasesAddRequest).actionGet()); + assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); + } + { + assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).actionGet()); + DataStreamIT.putComposableIndexTemplate( + "my-template", + null, + List.of("logs-*"), + null, + null, + Map.of("logs", AliasMetadata.builder("logs").build()) + ); + + var request = new CreateDataStreamAction.Request("logs-es"); + var e = expectThrows(IllegalStateException.class, () -> client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); + } + } + + public void testCreateDataStreamAliasWithSameNameAsIndex() throws Exception { + DataStreamIT.putComposableIndexTemplate("my-template", List.of("logs-*")); + + CreateIndexRequest createIndexRequest = new CreateIndexRequest("logs"); + assertAcked(client().admin().indices().create(createIndexRequest).actionGet()); + + { + var request = new CreateDataStreamAction.Request("logs-es"); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); + aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("logs-es").aliases("logs")); + var e = expectThrows(InvalidAliasNameException.class, () -> client().admin().indices().aliases(aliasesAddRequest).actionGet()); + assertThat( + e.getMessage(), + equalTo("Invalid alias name [logs]: an index or data stream exists with the same name as the alias") + ); + } + { + assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).actionGet()); + var e = expectThrows( + IllegalArgumentException.class, + () -> DataStreamIT.putComposableIndexTemplate( + "my-template", + null, + List.of("logs-*"), + null, + null, + Map.of("logs", AliasMetadata.builder("logs").build()) + ) + ); + assertThat( + e.getCause().getMessage(), + equalTo("Invalid alias name [logs]: an index or data stream exists with the same name as the alias") + ); + } + } + + public void testCreateIndexWithSameNameAsDataStreamAlias() throws Exception { + DataStreamIT.putComposableIndexTemplate("my-template", List.of("logs-*")); + + var request = new CreateDataStreamAction.Request("logs-es"); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); + aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("logs-es").aliases("logs")); + assertAcked(client().admin().indices().aliases(aliasesAddRequest).actionGet()); + + CreateIndexRequest createIndexRequest = new CreateIndexRequest("logs"); + var e = expectThrows(InvalidIndexNameException.class, () -> client().admin().indices().create(createIndexRequest).actionGet()); + assertThat(e.getMessage(), equalTo("Invalid index name [logs], already exists as alias")); + } + + public void testCreateIndexAliasWithSameNameAsDataStreamAlias() throws Exception { + DataStreamIT.putComposableIndexTemplate("my-template", List.of("logs-*")); + + var request = new CreateDataStreamAction.Request("logs-es"); + assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); + aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("logs-es").aliases("logs")); + assertAcked(client().admin().indices().aliases(aliasesAddRequest).actionGet()); + + { + CreateIndexRequest createIndexRequest = new CreateIndexRequest("my-index").alias(new Alias("logs")); + var e = expectThrows(IllegalStateException.class, () -> client().admin().indices().create(createIndexRequest).actionGet()); + assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); + } + { + CreateIndexRequest createIndexRequest = new CreateIndexRequest("my-index"); + assertAcked(client().admin().indices().create(createIndexRequest).actionGet()); + IndicesAliasesRequest addAliasRequest = new IndicesAliasesRequest(); + addAliasRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("my-index").aliases("logs")); + var e = expectThrows(IllegalStateException.class, () -> client().admin().indices().aliases(addAliasRequest).actionGet()); + assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); + } + } + private static void verifyResolvability(String dataStream, ActionRequestBuilder requestBuilder, boolean fail) { verifyResolvability(dataStream, requestBuilder, fail, 0); } @@ -1723,12 +1894,23 @@ static void putComposableIndexTemplate( List patterns, @Nullable Settings settings, @Nullable Map metadata + ) throws IOException { + putComposableIndexTemplate(id, mappings, patterns, settings, metadata, null); + } + + static void putComposableIndexTemplate( + String id, + @Nullable String mappings, + List patterns, + @Nullable Settings settings, + @Nullable Map metadata, + @Nullable Map aliases ) throws IOException { PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); request.indexTemplate( new ComposableIndexTemplate( patterns, - new Template(settings, mappings == null ? null : new CompressedXContent(mappings), null), + new Template(settings, mappings == null ? null : new CompressedXContent(mappings), aliases), null, null, null, diff --git a/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index 683ef7310a016..38f69173b8686 100644 --- a/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/x-pack/plugin/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -14,11 +14,13 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.index.IndexResponse; @@ -1075,4 +1077,33 @@ public void testRestoreSnapshotFully() throws Exception { assertThat(client.execute(GetDataStreamAction.INSTANCE, getRequest).get().getDataStreams(), hasSize(2)); assertNotNull(client.admin().indices().prepareGetIndex().setIndices(indexName).get()); } + + public void testRestoreDataStreamAliasWithConflictingDataStream() throws Exception { + var snapshotName = "test-snapshot"; + createFullSnapshot(REPO, snapshotName); + client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).actionGet(); + DataStreamIT.putComposableIndexTemplate("my-template", List.of("my-*")); + var request = new CreateDataStreamAction.Request("my-alias"); + assertAcked(client.execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + + var e = expectThrows( + IllegalStateException.class, + () -> client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true).get() + ); + assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); + } + + public void testRestoreDataStreamAliasWithConflictingIndicesAlias() throws Exception { + var snapshotName = "test-snapshot"; + createFullSnapshot(REPO, snapshotName); + client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request("*")).actionGet(); + CreateIndexRequest createIndexRequest = new CreateIndexRequest("my-index").alias(new Alias("my-alias")); + assertAcked(client.admin().indices().create(createIndexRequest).actionGet()); + + var e = expectThrows( + IllegalStateException.class, + () -> client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true).get() + ); + assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (my-alias)")); + } } From c2b62847aa30e20ed436b8ca3698e43d307a1f97 Mon Sep 17 00:00:00 2001 From: Rene Groeschke Date: Tue, 30 Nov 2021 09:37:20 +0100 Subject: [PATCH 88/88] Fix stopping of old elasticsearch cluster (#81059) due to not exposing the PID of the underlaying cluster the Fixture Stop task was skipped, leaving running clusters behind after the build finished --- .../src/main/java/oldes/OldElasticsearch.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/fixtures/old-elasticsearch/src/main/java/oldes/OldElasticsearch.java b/test/fixtures/old-elasticsearch/src/main/java/oldes/OldElasticsearch.java index ee70e0c55c259..1ec14f6c2939c 100644 --- a/test/fixtures/old-elasticsearch/src/main/java/oldes/OldElasticsearch.java +++ b/test/fixtures/old-elasticsearch/src/main/java/oldes/OldElasticsearch.java @@ -132,5 +132,9 @@ public static void main(String[] args) throws IOException { Path tmp = Files.createTempFile(baseDir, null, null); Files.write(tmp, Integer.toString(port).getBytes(StandardCharsets.UTF_8)); Files.move(tmp, baseDir.resolve("ports"), StandardCopyOption.ATOMIC_MOVE); + + tmp = Files.createTempFile(baseDir, null, null); + Files.write(tmp, Integer.toString(pid).getBytes(StandardCharsets.UTF_8)); + Files.move(tmp, baseDir.resolve("pid"), StandardCopyOption.ATOMIC_MOVE); } }