From 7c130d235a4e516bf9e7feee496ad0a334e64074 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 25 Jan 2019 14:55:45 -0800 Subject: [PATCH 01/57] Mute CcrRepositoryIT#testFollowerMappingIsUpdated Tracked in #37887. --- .../test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java index 0a3669734dc6b..ee02241978cfe 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java @@ -308,6 +308,7 @@ public void testRateLimitingIsEmployed() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37887") public void testFollowerMappingIsUpdated() throws IOException { String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster"; String leaderIndex = "index1"; From a35701e43736aaac4af782cab677189a9abc13ff Mon Sep 17 00:00:00 2001 From: Sivagurunathan Velayutham Date: Fri, 25 Jan 2019 16:57:50 -0600 Subject: [PATCH 02/57] Fix potential IllegalCapacityException in LLRC when selecting nodes (#37821) --- .../main/java/org/elasticsearch/client/RestClient.java | 2 +- .../java/org/elasticsearch/client/RestClientTests.java | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 3b1946ef9ed58..d053bda7d44fa 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -398,7 +398,7 @@ static Iterable selectNodes(NodeTuple> nodeTuple, Map livingNodes = new ArrayList<>(nodeTuple.nodes.size() - blacklist.size()); + List livingNodes = new ArrayList<>(Math.max(0, nodeTuple.nodes.size() - blacklist.size())); List deadNodes = new ArrayList<>(blacklist.size()); for (Node node : nodeTuple.nodes) { DeadHostState deadness = blacklist.get(node.getHost()); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 69cdfeae85dff..f3f0f0e58b98d 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -272,6 +272,15 @@ public String toString() { blacklist.put(n2.getHost(), new DeadHostState(new DeadHostState(timeSupplier))); blacklist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(timeSupplier)))); + /* + * case when fewer nodeTuple than blacklist, wont result in any IllegalCapacityException + */ + { + NodeTuple> fewerNodeTuple = new NodeTuple<>(Arrays.asList(n1, n2), null); + assertSelectLivingHosts(Arrays.asList(n1), fewerNodeTuple, blacklist, NodeSelector.ANY); + assertSelectLivingHosts(Arrays.asList(n2), fewerNodeTuple, blacklist, not1); + } + /* * selectHosts will revive a single host if regardless of * blacklist time. It'll revive the node that is closest From 827ed1214634161742ec08c7b4dd1968dcca6b74 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 25 Jan 2019 15:27:56 -0800 Subject: [PATCH 03/57] Mute TasksIT#testTransportBulkTasks Tracked in #37893. --- .../elasticsearch/action/admin/cluster/node/tasks/TasksIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index a2147544a87db..3e52999cbf382 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -291,7 +291,7 @@ public void testTransportBroadcastReplicationTasks() { } } - + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37893") public void testTransportBulkTasks() { registerTaskManageListeners(BulkAction.NAME); // main task registerTaskManageListeners(BulkAction.NAME + "[s]"); // shard task From 24ff23812e730730408c72d44d9f7ea4928d3c91 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 25 Jan 2019 14:38:00 -0800 Subject: [PATCH 04/57] Mute TestClustersPluginIT and BuildExamplePluginsIT Tracked in #37889. --- .../java/org/elasticsearch/gradle/BuildExamplePluginsIT.java | 2 ++ .../elasticsearch/gradle/testclusters/TestClustersPluginIT.java | 2 ++ 2 files changed, 4 insertions(+) diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index 239e6d37c81e3..39d6e433ac36e 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.GradleRunner; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Rule; import org.junit.rules.TemporaryFolder; @@ -38,6 +39,7 @@ import java.util.Objects; import java.util.stream.Collectors; +@Ignore // Awaiting a fix in https://github.com/elastic/elasticsearch/issues/37889. public class BuildExamplePluginsIT extends GradleIntegrationTestCase { private static final List EXAMPLE_PLUGINS = Collections.unmodifiableList( diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index 9b7c5686e8102..bb69665026b1d 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -21,9 +21,11 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; +import org.junit.Ignore; import java.util.Arrays; +@Ignore // Awaiting a fix in https://github.com/elastic/elasticsearch/issues/37889. public class TestClustersPluginIT extends GradleIntegrationTestCase { public void testListClusters() { From e41ccdc1a01278c0930500ec27b07353e1d50c81 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 25 Jan 2019 16:15:14 -0800 Subject: [PATCH 05/57] Mute GeoWKTShapeParserTests#testParseGeometryCollection Tracked in #37894. --- .../org/elasticsearch/common/geo/GeoWKTShapeParserTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java index f1d9b0f161570..6518e05cf330c 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java @@ -429,6 +429,7 @@ public void testInvalidGeometryType() throws IOException { assertValidException(builder, IllegalArgumentException.class); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37894") @Override public void testParseGeometryCollection() throws IOException { if (rarely()) { From 23b0d9b3ed7f212e0a7c80c9eb08d215b4840784 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 25 Jan 2019 16:50:39 -0800 Subject: [PATCH 06/57] Mute RecoveryWhileUnderLoadIT#testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest Tracked in #37895. --- .../org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index c0345be6fae01..d23239509f3b4 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -115,6 +115,7 @@ public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37895") public void testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() throws Exception { logger.info("--> creating test index ..."); int numberOfShards = numberOfShards(); From 58301ead6d23fe943a8ea02376ff5ef99768f42b Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 25 Jan 2019 17:12:14 -0800 Subject: [PATCH 07/57] Mute IndexShardIT#testMaybeFlush Tracked in #37896. --- .../test/java/org/elasticsearch/index/shard/IndexShardIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 1bcb8cd29104f..58c4844a11bfc 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -342,6 +342,7 @@ public void testIndexCanChangeCustomDataPath() throws Exception { assertPathHasBeenCleared(endDir.toAbsolutePath()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37896") public void testMaybeFlush() throws Exception { createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST) .build()); From eb973a47441313e2699857fceba7a871fac7384f Mon Sep 17 00:00:00 2001 From: Tal Levy Date: Fri, 25 Jan 2019 17:29:04 -0800 Subject: [PATCH 08/57] fix GeoHashGridTests precision parsing error Previously, a hardcoded precision value of 4 was used by these tests resulting in no approximation errors. Now that the precision is between 1-12, precision values of 1 and 2 result in potential bucketing errors. This commit adjusts the range to be 4-12. Fixes #37892. --- .../search/aggregations/bucket/geogrid/GeoHashGridTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java index c48308e6e1724..9d4079646bab9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridTests.java @@ -51,6 +51,6 @@ protected long longEncode(double lng, double lat, int precision) { @Override protected int randomPrecision() { - return randomIntBetween(1, 12); + return randomIntBetween(4, 12); } } From afc60bb0e51aff57b8916401d693add63d56d0b8 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Fri, 25 Jan 2019 18:09:34 -0800 Subject: [PATCH 09/57] Mute DynamicMappingIT#testConflictingDynamicMappings Tracked in #37898. --- .../java/org/elasticsearch/index/mapper/DynamicMappingIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 5655d741a9dd2..5ec63681fe690 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -41,6 +41,7 @@ protected Collection> nodePlugins() { return Collections.singleton(InternalSettingsPlugin.class); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37898") public void testConflictingDynamicMappings() { // we don't use indexRandom because the order of requests is important here createIndex("index"); From f2c0c26d15335c92c47e52e9e06ea8b84c23e1cb Mon Sep 17 00:00:00 2001 From: David Roberts Date: Sat, 26 Jan 2019 20:19:57 +0000 Subject: [PATCH 10/57] [ML] Adjust structure finder for Joda to Java time migration (#37306) The ML file structure finder has always reported both Joda and Java time format strings. This change makes the Java time format strings the ones that are incorporated into mappings and ingest pipeline definitions. The BWC syntax of prepending "8" to these formats is used. This will need to be removed once Java time format strings become the default in Elasticsearch. This commit also removes direct imports of Joda classes in the structure finder unit tests. Instead the core Joda BWC class is used. --- .../ml/apis/find-file-structure.asciidoc | 64 ++++++++++--------- .../DelimitedFileStructureFinder.java | 2 +- .../FileStructureUtils.java | 17 ++++- .../NdJsonFileStructureFinder.java | 2 +- .../TextLogFileStructureFinder.java | 2 +- .../TimestampFormatFinder.java | 7 +- .../XmlFileStructureFinder.java | 2 +- .../FileStructureUtilsTests.java | 35 +++++----- .../TextLogFileStructureFinderTests.java | 2 +- .../TimestampFormatFinderTests.java | 45 ++++++++----- 10 files changed, 107 insertions(+), 71 deletions(-) diff --git a/docs/reference/ml/apis/find-file-structure.asciidoc b/docs/reference/ml/apis/find-file-structure.asciidoc index ddc72b78d8e86..9650efff16189 100644 --- a/docs/reference/ml/apis/find-file-structure.asciidoc +++ b/docs/reference/ml/apis/find-file-structure.asciidoc @@ -164,37 +164,40 @@ format corresponds to the primary timestamp, but you do not want to specify the full `grok_pattern`. If this parameter is not specified, the structure finder chooses the best format from -the formats it knows, which are these Joda formats and their Java time equivalents: - -* `dd/MMM/YYYY:HH:mm:ss Z` -* `EEE MMM dd HH:mm zzz YYYY` -* `EEE MMM dd HH:mm:ss YYYY` -* `EEE MMM dd HH:mm:ss zzz YYYY` -* `EEE MMM dd YYYY HH:mm zzz` -* `EEE MMM dd YYYY HH:mm:ss zzz` -* `EEE, dd MMM YYYY HH:mm Z` -* `EEE, dd MMM YYYY HH:mm ZZ` -* `EEE, dd MMM YYYY HH:mm:ss Z` -* `EEE, dd MMM YYYY HH:mm:ss ZZ` +the formats it knows, which are these Java time formats and their Joda equivalents: + +* `dd/MMM/yyyy:HH:mm:ss XX` +* `EEE MMM dd HH:mm zzz yyyy` +* `EEE MMM dd HH:mm:ss yyyy` +* `EEE MMM dd HH:mm:ss zzz yyyy` +* `EEE MMM dd yyyy HH:mm zzz` +* `EEE MMM dd yyyy HH:mm:ss zzz` +* `EEE, dd MMM yyyy HH:mm XX` +* `EEE, dd MMM yyyy HH:mm XXX` +* `EEE, dd MMM yyyy HH:mm:ss XX` +* `EEE, dd MMM yyyy HH:mm:ss XXX` * `ISO8601` * `MMM d HH:mm:ss` * `MMM d HH:mm:ss,SSS` -* `MMM d YYYY HH:mm:ss` +* `MMM d yyyy HH:mm:ss` * `MMM dd HH:mm:ss` * `MMM dd HH:mm:ss,SSS` -* `MMM dd YYYY HH:mm:ss` -* `MMM dd, YYYY h:mm:ss a` +* `MMM dd yyyy HH:mm:ss` +* `MMM dd, yyyy h:mm:ss a` * `TAI64N` * `UNIX` * `UNIX_MS` -* `YYYY-MM-dd HH:mm:ss` -* `YYYY-MM-dd HH:mm:ss,SSS` -* `YYYY-MM-dd HH:mm:ss,SSS Z` -* `YYYY-MM-dd HH:mm:ss,SSSZ` -* `YYYY-MM-dd HH:mm:ss,SSSZZ` -* `YYYY-MM-dd HH:mm:ssZ` -* `YYYY-MM-dd HH:mm:ssZZ` -* `YYYYMMddHHmmss` +* `yyyy-MM-dd HH:mm:ss` +* `yyyy-MM-dd HH:mm:ss,SSS` +* `yyyy-MM-dd HH:mm:ss,SSS XX` +* `yyyy-MM-dd HH:mm:ss,SSSXX` +* `yyyy-MM-dd HH:mm:ss,SSSXXX` +* `yyyy-MM-dd HH:mm:ssXX` +* `yyyy-MM-dd HH:mm:ssXXX` +* `yyyy-MM-dd'T'HH:mm:ss,SSS` +* `yyyy-MM-dd'T'HH:mm:ss,SSSXX` +* `yyyy-MM-dd'T'HH:mm:ss,SSSXXX` +* `yyyyMMddHHmmss` -- @@ -603,11 +606,11 @@ If the request does not encounter errors, you receive the following result: }, "tpep_dropoff_datetime" : { "type" : "date", - "format" : "YYYY-MM-dd HH:mm:ss" + "format" : "8yyyy-MM-dd HH:mm:ss" }, "tpep_pickup_datetime" : { "type" : "date", - "format" : "YYYY-MM-dd HH:mm:ss" + "format" : "8yyyy-MM-dd HH:mm:ss" }, "trip_distance" : { "type" : "double" @@ -621,7 +624,7 @@ If the request does not encounter errors, you receive the following result: "field" : "tpep_pickup_datetime", "timezone" : "{{ beat.timezone }}", "formats" : [ - "YYYY-MM-dd HH:mm:ss" + "8yyyy-MM-dd HH:mm:ss" ] } } @@ -1287,10 +1290,9 @@ If the request does not encounter errors, you receive the following result: was chosen because it comes first in the column order. If you prefer `tpep_dropoff_datetime` then force it to be chosen using the `timestamp_field` query parameter. -<8> `joda_timestamp_formats` are used to tell Logstash and Ingest pipeline how - to parse timestamps. +<8> `joda_timestamp_formats` are used to tell Logstash how to parse timestamps. <9> `java_timestamp_formats` are the Java time formats recognized in the time - fields. In future Ingest pipeline will switch to use this format. + fields. Elasticsearch mappings and Ingest pipeline use this format. <10> The timestamp format in this sample doesn't specify a timezone, so to accurately convert them to UTC timestamps to store in Elasticsearch it's necessary to supply the timezone they relate to. `need_client_timezone` @@ -1396,7 +1398,7 @@ this: "field" : "timestamp", "timezone" : "{{ beat.timezone }}", "formats" : [ - "ISO8601" + "8yyyy-MM-dd'T'HH:mm:ss,SSS" ] } }, @@ -1556,7 +1558,7 @@ this: "field" : "timestamp", "timezone" : "{{ beat.timezone }}", "formats" : [ - "ISO8601" + "8yyyy-MM-dd'T'HH:mm:ss,SSS" ] } }, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java index 93d440b79d434..dd30c0a1f94bc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java @@ -149,7 +149,7 @@ static DelimitedFileStructureFinder makeDelimitedFileStructureFinder(List makeIngestPipelineDefinition(String grokPatter if (needClientTimezone) { dateProcessorSettings.put("timezone", "{{ " + BEAT_TIMEZONE_FIELD + " }}"); } - dateProcessorSettings.put("formats", timestampFormats); + dateProcessorSettings.put("formats", jodaBwcJavaTimestampFormatsForIngestPipeline(timestampFormats)); processors.add(Collections.singletonMap("date", dateProcessorSettings)); } @@ -365,4 +365,19 @@ public static Map makeIngestPipelineDefinition(String grokPatter pipeline.put(Pipeline.PROCESSORS_KEY, processors); return pipeline; } + + // TODO: remove this method when Java time formats are the default + static List jodaBwcJavaTimestampFormatsForIngestPipeline(List javaTimestampFormats) { + return javaTimestampFormats.stream().map(format -> { + switch (format) { + case "ISO8601": + case "UNIX_MS": + case "UNIX": + case "TAI64N": + return format; + default: + return "8" + format; + } + }).collect(Collectors.toList()); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java index d7ba426d6a391..33d9ba56b3f53 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/NdJsonFileStructureFinder.java @@ -63,7 +63,7 @@ static NdJsonFileStructureFinder makeNdJsonFileStructureFinder(List expl .setJavaTimestampFormats(timeField.v2().javaTimestampFormats) .setNeedClientTimezone(needClientTimeZone) .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, timeField.v1(), - timeField.v2().jodaTimestampFormats, needClientTimeZone)); + timeField.v2().javaTimestampFormats, needClientTimeZone)); } Tuple, SortedMap> mappingsAndFieldStats = diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java index c61a48beb116f..b476e3e465463 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinder.java @@ -123,7 +123,7 @@ static TextLogFileStructureFinder makeTextLogFileStructureFinder(List ex .setNeedClientTimezone(needClientTimeZone) .setGrokPattern(grokPattern) .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(grokPattern, interimTimestampField, - bestTimestamp.v1().jodaTimestampFormats, needClientTimeZone)) + bestTimestamp.v1().javaTimestampFormats, needClientTimeZone)) .setMappings(mappings) .setFieldStats(fieldStats) .setExplanation(explanation) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java index 392e7b4e0be5e..07dba7dcb2c64 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinder.java @@ -457,13 +457,13 @@ public boolean hasTimezoneDependentParsing() { * and possibly also a "format" setting. */ public Map getEsDateMappingTypeWithFormat() { - if (jodaTimestampFormats.contains("TAI64N")) { + if (javaTimestampFormats.contains("TAI64N")) { // There's no format for TAI64N in the timestamp formats used in mappings return Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"); } Map mapping = new LinkedHashMap<>(); mapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); - String formats = jodaTimestampFormats.stream().flatMap(format -> { + String formats = javaTimestampFormats.stream().flatMap(format -> { switch (format) { case "ISO8601": return Stream.empty(); @@ -472,7 +472,8 @@ public Map getEsDateMappingTypeWithFormat() { case "UNIX": return Stream.of("epoch_second"); default: - return Stream.of(format); + // TODO: remove the "8" prefix when Java time formats are the default + return Stream.of("8" + format); } }).collect(Collectors.joining("||")); if (formats.isEmpty() == false) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java index b9a805a14feeb..53550ebf18dd3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/XmlFileStructureFinder.java @@ -101,7 +101,7 @@ static XmlFileStructureFinder makeXmlFileStructureFinder(List explanatio .setJavaTimestampFormats(timeField.v2().javaTimestampFormats) .setNeedClientTimezone(needClientTimeZone) .setIngestPipeline(FileStructureUtils.makeIngestPipelineDefinition(null, topLevelTag + "." + timeField.v1(), - timeField.v2().jodaTimestampFormats, needClientTimeZone)); + timeField.v2().javaTimestampFormats, needClientTimeZone)); } Tuple, SortedMap> mappingsAndFieldStats = diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java index 389a65da749a5..8140d2fa6034f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/FileStructureUtilsTests.java @@ -39,7 +39,7 @@ public void testGuessTimestampGivenSingleSampleSingleField() { EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("field1", match.v1()); - assertThat(match.v2().jodaTimestampFormats, contains("ISO8601")); + assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd'T'HH:mm:ss,SSS")); assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); } @@ -52,7 +52,7 @@ public void testGuessTimestampGivenSingleSampleSingleFieldAndConsistentTimeField overrides, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("field1", match.v1()); - assertThat(match.v2().jodaTimestampFormats, contains("ISO8601")); + assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd'T'HH:mm:ss,SSS")); assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); } @@ -77,20 +77,20 @@ public void testGuessTimestampGivenSingleSampleSingleFieldAndConsistentTimeForma overrides, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("field1", match.v1()); - assertThat(match.v2().jodaTimestampFormats, contains("ISO8601")); + assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd'T'HH:mm:ss,SSS")); assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); } public void testGuessTimestampGivenSingleSampleSingleFieldAndImpossibleTimeFormatOverride() { - FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("EEE MMM dd HH:mm:ss YYYY").build(); + FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("EEE MMM dd HH:mm:ss yyyy").build(); Map sample = Collections.singletonMap("field1", "2018-05-24T17:28:31,735"); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> FileStructureUtils.guessTimestampField(explanation, Collections.singletonList(sample), overrides, NOOP_TIMEOUT_CHECKER)); - assertEquals("Specified timestamp format [EEE MMM dd HH:mm:ss YYYY] does not match for record [{field1=2018-05-24T17:28:31,735}]", + assertEquals("Specified timestamp format [EEE MMM dd HH:mm:ss yyyy] does not match for record [{field1=2018-05-24T17:28:31,735}]", e.getMessage()); } @@ -101,7 +101,7 @@ public void testGuessTimestampGivenSamplesWithSameSingleTimeField() { EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("field1", match.v1()); - assertThat(match.v2().jodaTimestampFormats, contains("ISO8601")); + assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd'T'HH:mm:ss,SSS")); assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); } @@ -130,7 +130,7 @@ public void testGuessTimestampGivenSingleSampleManyFieldsOneTimeFormat() { EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time", match.v1()); - assertThat(match.v2().jodaTimestampFormats, contains("YYYY-MM-dd HH:mm:ss,SSS")); + assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd HH:mm:ss,SSS")); assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); } @@ -147,7 +147,7 @@ public void testGuessTimestampGivenSamplesWithManyFieldsSameSingleTimeFormat() { EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time", match.v1()); - assertThat(match.v2().jodaTimestampFormats, contains("YYYY-MM-dd HH:mm:ss,SSS")); + assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd HH:mm:ss,SSS")); assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); } @@ -178,7 +178,7 @@ public void testGuessTimestampGivenSamplesWithManyFieldsSameSingleTimeFormatDist EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time", match.v1()); - assertThat(match.v2().jodaTimestampFormats, contains("YYYY-MM-dd HH:mm:ss,SSS")); + assertThat(match.v2().javaTimestampFormats, contains("yyyy-MM-dd HH:mm:ss,SSS")); assertEquals("TIMESTAMP_ISO8601", match.v2().grokPatternName); } @@ -195,7 +195,7 @@ public void testGuessTimestampGivenSamplesWithManyFieldsSameSingleTimeFormatDist EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time", match.v1()); - assertThat(match.v2().jodaTimestampFormats, contains("MMM dd YYYY HH:mm:ss", "MMM d YYYY HH:mm:ss")); + assertThat(match.v2().javaTimestampFormats, contains("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss")); assertEquals("CISCOTIMESTAMP", match.v2().grokPatternName); } @@ -228,7 +228,7 @@ public void testGuessTimestampGivenSamplesWithManyFieldsInconsistentAndConsisten EMPTY_OVERRIDES, NOOP_TIMEOUT_CHECKER); assertNotNull(match); assertEquals("time2", match.v1()); - assertThat(match.v2().jodaTimestampFormats, contains("MMM dd YYYY HH:mm:ss", "MMM d YYYY HH:mm:ss")); + assertThat(match.v2().javaTimestampFormats, contains("MMM dd yyyy HH:mm:ss", "MMM d yyyy HH:mm:ss")); assertEquals("CISCOTIMESTAMP", match.v2().grokPatternName); } @@ -331,7 +331,8 @@ public void testGuessMappingsAndCalculateFieldStats() { assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "keyword"), mappings.get("foo")); Map expectedTimeMapping = new HashMap<>(); expectedTimeMapping.put(FileStructureUtils.MAPPING_TYPE_SETTING, "date"); - expectedTimeMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "YYYY-MM-dd HH:mm:ss,SSS"); + // TODO: remove the "8" prefix when Java time formats are the default + expectedTimeMapping.put(FileStructureUtils.MAPPING_FORMAT_SETTING, "8" + "yyyy-MM-dd HH:mm:ss,SSS"); assertEquals(expectedTimeMapping, mappings.get("time")); assertEquals(Collections.singletonMap(FileStructureUtils.MAPPING_TYPE_SETTING, "long"), mappings.get("bar")); assertNull(mappings.get("nothing")); @@ -354,7 +355,7 @@ public void testMakeIngestPipelineDefinitionGivenStructuredWithoutTimestamp() { public void testMakeIngestPipelineDefinitionGivenStructuredWithTimestamp() { String timestampField = randomAlphaOfLength(10); - List timestampFormats = randomFrom(TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS).jodaTimestampFormats; + List timestampFormats = randomFrom(TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS).javaTimestampFormats; boolean needClientTimezone = randomBoolean(); Map pipeline = @@ -371,7 +372,8 @@ public void testMakeIngestPipelineDefinitionGivenStructuredWithTimestamp() { assertNotNull(dateProcessor); assertEquals(timestampField, dateProcessor.get("field")); assertEquals(needClientTimezone, dateProcessor.containsKey("timezone")); - assertEquals(timestampFormats, dateProcessor.get("formats")); + // TODO: remove the call to jodaBwcJavaTimestampFormatsForIngestPipeline() when Java time formats are the default + assertEquals(FileStructureUtils.jodaBwcJavaTimestampFormatsForIngestPipeline(timestampFormats), dateProcessor.get("formats")); // After removing the two expected fields there should be nothing left in the pipeline assertEquals(Collections.emptyMap(), pipeline); @@ -382,7 +384,7 @@ public void testMakeIngestPipelineDefinitionGivenSemiStructured() { String grokPattern = randomAlphaOfLength(100); String timestampField = randomAlphaOfLength(10); - List timestampFormats = randomFrom(TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS).jodaTimestampFormats; + List timestampFormats = randomFrom(TimestampFormatFinder.ORDERED_CANDIDATE_FORMATS).javaTimestampFormats; boolean needClientTimezone = randomBoolean(); Map pipeline = @@ -404,7 +406,8 @@ public void testMakeIngestPipelineDefinitionGivenSemiStructured() { assertNotNull(dateProcessor); assertEquals(timestampField, dateProcessor.get("field")); assertEquals(needClientTimezone, dateProcessor.containsKey("timezone")); - assertEquals(timestampFormats, dateProcessor.get("formats")); + // TODO: remove the call to jodaBwcJavaTimestampFormatsForIngestPipeline() when Java time formats are the default + assertEquals(FileStructureUtils.jodaBwcJavaTimestampFormatsForIngestPipeline(timestampFormats), dateProcessor.get("formats")); Map removeProcessor = (Map) processors.get(2).get("remove"); assertNotNull(removeProcessor); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java index de4244cd620a5..7ed5518c65077 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TextLogFileStructureFinderTests.java @@ -357,7 +357,7 @@ public void testMostLikelyTimestampGivenExceptionTrace() { public void testMostLikelyTimestampGivenExceptionTraceAndTimestampFormatOverride() { - FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("YYYY-MM-dd HH:mm:ss").build(); + FileStructureOverrides overrides = FileStructureOverrides.builder().setTimestampFormat("yyyy-MM-dd HH:mm:ss").build(); Tuple> mostLikelyMatch = TextLogFileStructureFinder.mostLikelyTimestamp(EXCEPTION_TRACE_SAMPLE.split("\n"), overrides, NOOP_TIMEOUT_CHECKER); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java index f6f75fe722dac..0374ed6f34175 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java @@ -6,9 +6,17 @@ package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; +import java.time.Instant; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeFormatterBuilder; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAccessor; +import java.time.temporal.TemporalQueries; import java.util.Arrays; import java.util.List; import java.util.Locale; @@ -269,32 +277,39 @@ private void validateTimestampMatch(TimestampMatch expected, String text, long e assertTrue(expected.simplePattern.matcher(text).find()); } + // This is because parsing timestamps using Joda formats generates warnings. + // Eventually we'll probably just remove the checks that the Joda formats + // are valid, and at that point this method can be removed too. + protected boolean enableWarningsCheck() { + return false; + } + + // This method is using the Joda BWC layer. When that's removed, this method + // can be deleted - we'll just validate the Java time formats after that. + // Also remove enableWarningsCheck() above if this method is removed. private void validateJodaTimestampFormats(List jodaTimestampFormats, String text, long expectedEpochMs) { // All the test times are for Tue May 15 2018 16:14:56 UTC, which is 17:14:56 in London. // This is the timezone that will be used for any text representations that don't include it. - org.joda.time.DateTimeZone defaultZone = org.joda.time.DateTimeZone.forID("Europe/London"); - org.joda.time.DateTime parsed; + ZoneId defaultZone = ZoneId.of("Europe/London"); + long actualEpochMs; for (int i = 0; i < jodaTimestampFormats.size(); ++i) { try { String timestampFormat = jodaTimestampFormats.get(i); switch (timestampFormat) { case "ISO8601": - parsed = org.joda.time.format.ISODateTimeFormat.dateTimeParser() - .withZone(defaultZone).withDefaultYear(2018).parseDateTime(text); + actualEpochMs = Joda.forPattern("date_optional_time").withZone(defaultZone).parseMillis(text); break; default: - org.joda.time.format.DateTimeFormatter parser = - org.joda.time.format.DateTimeFormat.forPattern(timestampFormat).withZone(defaultZone).withLocale(Locale.ROOT); - parsed = parser.withDefaultYear(2018).parseDateTime(text); + actualEpochMs = Joda.forPattern(timestampFormat).withYear(2018).withZone(defaultZone).parseMillis(text); break; } - if (expectedEpochMs == parsed.getMillis()) { + if (expectedEpochMs == actualEpochMs) { break; } // If the last one isn't right then propagate if (i == jodaTimestampFormats.size() - 1) { - assertEquals(expectedEpochMs, parsed.getMillis()); + assertEquals(expectedEpochMs, actualEpochMs); } } catch (RuntimeException e) { // If the last one throws then propagate @@ -309,8 +324,8 @@ private void validateJavaTimestampFormats(List javaTimestampFormats, Str // All the test times are for Tue May 15 2018 16:14:56 UTC, which is 17:14:56 in London. // This is the timezone that will be used for any text representations that don't include it. - java.time.ZoneId defaultZone = java.time.ZoneId.of("Europe/London"); - java.time.temporal.TemporalAccessor parsed; + ZoneId defaultZone = ZoneId.of("Europe/London"); + TemporalAccessor parsed; for (int i = 0; i < javaTimestampFormats.size(); ++i) { try { String timestampFormat = javaTimestampFormats.get(i); @@ -319,8 +334,8 @@ private void validateJavaTimestampFormats(List javaTimestampFormats, Str parsed = DateFormatter.forPattern("strict_date_optional_time_nanos").withZone(defaultZone).parse(text); break; default: - java.time.format.DateTimeFormatter parser = new java.time.format.DateTimeFormatterBuilder() - .appendPattern(timestampFormat).parseDefaulting(java.time.temporal.ChronoField.YEAR_OF_ERA, 2018) + DateTimeFormatter parser = new DateTimeFormatterBuilder() + .appendPattern(timestampFormat).parseDefaulting(ChronoField.YEAR_OF_ERA, 2018) .toFormatter(Locale.ROOT); // This next line parses the textual date without any default timezone, so if // the text doesn't contain the timezone then the resulting temporal accessor @@ -332,14 +347,14 @@ private void validateJavaTimestampFormats(List javaTimestampFormats, Str // timezone and then again with a default timezone if the first parse didn't // find one in the text. parsed = parser.parse(text); - if (parsed.query(java.time.temporal.TemporalQueries.zone()) == null) { + if (parsed.query(TemporalQueries.zone()) == null) { // TODO: when Java 8 is no longer supported remove the two // lines and comment above and the closing brace below parsed = parser.withZone(defaultZone).parse(text); } break; } - long actualEpochMs = java.time.Instant.from(parsed).toEpochMilli(); + long actualEpochMs = Instant.from(parsed).toEpochMilli(); if (expectedEpochMs == actualEpochMs) { break; } From 780b4c72fe90c83ce08b7c5b96f70e91f53b1f73 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sat, 26 Jan 2019 22:01:30 -0500 Subject: [PATCH 11/57] Make ChannelActionListener a top-level class (#37797) We start using this class more often. Let's make it a top-level class. --- .../action/search/SearchTransportService.java | 2 +- .../action/support/ChannelActionListener.java | 62 +++++++++++++++++++ .../support/HandledTransportAction.java | 38 ------------ .../shard/TransportSingleShardAction.java | 7 +-- .../recovery/PeerRecoverySourceService.java | 4 +- .../recovery/PeerRecoveryTargetService.java | 13 ++-- 6 files changed, 73 insertions(+), 53 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 577ce4f6b7aec..7d2dd9a22b257 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -23,7 +23,7 @@ import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.action.support.HandledTransportAction.ChannelActionListener; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; diff --git a/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java new file mode 100644 index 0000000000000..b23758758e24d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/ChannelActionListener.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportResponse; + +public final class ChannelActionListener< + Response extends TransportResponse, Request extends TransportRequest> implements ActionListener { + + private static final Logger logger = LogManager.getLogger(ChannelActionListener.class); + private final TransportChannel channel; + private final Request request; + private final String actionName; + + public ChannelActionListener(TransportChannel channel, String actionName, Request request) { + this.channel = channel; + this.request = request; + this.actionName = actionName; + } + + @Override + public void onResponse(Response response) { + try { + channel.sendResponse(response); + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn(() -> new ParameterizedMessage( + "Failed to send error response for action [{}] and request [{}]", actionName, request), e1); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index f1f4962851c99..0b35bc8fb89d6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -18,19 +18,14 @@ */ package org.elasticsearch.action.support; -import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import java.util.function.Supplier; @@ -74,37 +69,4 @@ public final void messageReceived(final Request request, final TransportChannel } } - public static final class ChannelActionListener implements - ActionListener { - private final Logger logger = LogManager.getLogger(getClass()); - private final TransportChannel channel; - private final Request request; - private final String actionName; - - public ChannelActionListener(TransportChannel channel, String actionName, Request request) { - this.channel = channel; - this.request = request; - this.actionName = actionName; - } - - @Override - public void onResponse(Response response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (Exception e1) { - logger.warn(() -> new ParameterizedMessage( - "Failed to send error response for action [{}] and request [{}]", actionName, request), e1); - } - } - } - } diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index a028c3f0e1e57..6d7ad085dcd1e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -24,7 +24,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterState; @@ -285,7 +285,7 @@ private class TransportHandler implements TransportRequestHandler { @Override public void messageReceived(Request request, final TransportChannel channel, Task task) throws Exception { // if we have a local operation, execute it on a thread since we don't spawn - execute(request, new HandledTransportAction.ChannelActionListener<>(channel, actionName, request)); + execute(request, new ChannelActionListener<>(channel, actionName, request)); } } @@ -296,8 +296,7 @@ public void messageReceived(final Request request, final TransportChannel channe if (logger.isTraceEnabled()) { logger.trace("executing [{}] on shard [{}]", request, request.internalShardId); } - asyncShardOperation(request, request.internalShardId, new HandledTransportAction.ChannelActionListener<>(channel, - transportShardAction, request)); + asyncShardOperation(request, request.internalShardId, new ChannelActionListener<>(channel, transportShardAction, request)); } } /** diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java index 556df71ca2cb9..f53e8edecd9e6 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java @@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; @@ -109,7 +109,7 @@ private void recover(StartRecoveryRequest request, ActionListener { @Override public void messageReceived(final StartRecoveryRequest request, final TransportChannel channel, Task task) throws Exception { - recover(request, new HandledTransportAction.ChannelActionListener<>(channel, Actions.START_RECOVERY, request)); + recover(request, new ChannelActionListener<>(channel, Actions.START_RECOVERY, request)); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index bc2196501fb8e..dbbaed1132e62 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -30,7 +30,7 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; @@ -433,8 +433,7 @@ class PrepareForTranslogOperationsRequestHandler implements TransportRequestHand @Override public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel, Task task) { try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { - final ActionListener listener = - new HandledTransportAction.ChannelActionListener<>(channel, Actions.PREPARE_TRANSLOG, request); + final ActionListener listener = new ChannelActionListener<>(channel, Actions.PREPARE_TRANSLOG, request); recoveryRef.target().prepareForTranslogOperations(request.isFileBasedRecovery(), request.totalTranslogOps(), ActionListener.wrap(nullVal -> listener.onResponse(TransportResponse.Empty.INSTANCE), listener::onFailure)); } @@ -446,8 +445,7 @@ class FinalizeRecoveryRequestHandler implements TransportRequestHandler listener = - new HandledTransportAction.ChannelActionListener<>(channel, Actions.FINALIZE, request); + final ActionListener listener = new ChannelActionListener<>(channel, Actions.FINALIZE, request); recoveryRef.target().finalizeRecovery(request.globalCheckpoint(), ActionListener.wrap(nullVal -> listener.onResponse(TransportResponse.Empty.INSTANCE), listener::onFailure)); } @@ -489,7 +487,7 @@ public void messageReceived(final RecoveryTranslogOperationsRequest request, fin final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); final RecoveryTarget recoveryTarget = recoveryRef.target(); final ActionListener listener = - new HandledTransportAction.ChannelActionListener<>(channel, Actions.TRANSLOG_OPS, request); + new ChannelActionListener<>(channel, Actions.TRANSLOG_OPS, request); final Consumer retryOnMappingException = exception -> { // in very rare cases a translog replay from primary is processed before a mapping update on this node // which causes local mapping changes since the mapping (clusterstate) might not have arrived on this node. @@ -626,8 +624,7 @@ public void messageReceived(final RecoveryFileChunkRequest request, TransportCha recoveryTarget.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos); } } - final ActionListener listener = - new HandledTransportAction.ChannelActionListener<>(channel, Actions.FILE_CHUNK, request); + final ActionListener listener = new ChannelActionListener<>(channel, Actions.FILE_CHUNK, request); recoveryTarget.writeFileChunk(request.metadata(), request.position(), request.content(), request.lastChunk(), request.totalTranslogOps(), ActionListener.wrap(nullVal -> listener.onResponse(TransportResponse.Empty.INSTANCE), listener::onFailure)); From cb134470c117794603fcdf6df223621c1869abd6 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Sun, 27 Jan 2019 08:27:40 +0000 Subject: [PATCH 12/57] [TEST] Fix MlMappingsUpgradeIT testMappingsUpgrade (#37769) Made the test tolerant to index upgrade being run in between the old/mixed/upgraded portions. This can occur because the rolling upgrade tests all share the same indices. Fixes #37763 --- .../upgrades/MlMappingsUpgradeIT.java | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java index ca41afe6c39da..4bded9a25c56c 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java @@ -38,7 +38,6 @@ protected Collection templatesToWaitFor() { * The purpose of this test is to ensure that when a job is open through a rolling upgrade we upgrade the results * index mappings when it is assigned to an upgraded node even if no other ML endpoint is called after the upgrade */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37763") public void testMappingsUpgrade() throws Exception { switch (CLUSTER_TYPE) { @@ -65,6 +64,8 @@ private void createAndOpenTestJob() throws IOException { Job.Builder job = new Job.Builder(JOB_ID); job.setAnalysisConfig(analysisConfig); job.setDataDescription(new DataDescription.Builder()); + // Use a custom index because other rolling upgrade tests meddle with the shared index + job.setResultsIndexName("mappings-upgrade-test"); Request putJob = new Request("PUT", "_ml/anomaly_detectors/" + JOB_ID); putJob.setJsonEntity(Strings.toString(job.build())); @@ -85,7 +86,16 @@ private void assertUpgradedMappings() throws Exception { Map responseLevel = entityAsMap(response); assertNotNull(responseLevel); - Map indexLevel = (Map) responseLevel.get(".ml-anomalies-shared"); + Map indexLevel = null; + // The name of the concrete index underlying the results index alias may or may not have been changed + // by the upgrade process (depending on what other tests are being run and the order they're run in), + // so navigating to the next level of the tree must account for both cases + for (Map.Entry entry : responseLevel.entrySet()) { + if (entry.getKey().startsWith(".ml-anomalies-") && entry.getKey().contains("mappings-upgrade-test")) { + indexLevel = (Map) entry.getValue(); + break; + } + } assertNotNull(indexLevel); Map mappingsLevel = (Map) indexLevel.get("mappings"); assertNotNull(mappingsLevel); From 5fddb631a2b7e1be52233451b442eda70da88e3a Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sun, 27 Jan 2019 07:49:56 -0500 Subject: [PATCH 13/57] Introduce retention lease syncing (#37398) This commit introduces retention lease syncing from the primary to its replicas when a new retention lease is added. A follow-up commit will add a background sync of the retention leases as well so that renewed retention leases are synced to replicas. --- .../replication/TransportWriteAction.java | 2 +- .../org/elasticsearch/index/IndexService.java | 8 +- .../index/seqno/ReplicationTracker.java | 76 +++++- .../index/seqno/RetentionLeaseSyncAction.java | 206 +++++++++++++++ .../index/seqno/RetentionLeaseSyncer.java | 48 ++++ .../elasticsearch/index/shard/IndexShard.java | 54 +++- .../elasticsearch/indices/IndicesService.java | 7 +- .../cluster/IndicesClusterStateService.java | 21 +- ...ReplicationTrackerRetentionLeaseTests.java | 79 ++++-- .../seqno/ReplicationTrackerTestCase.java | 3 +- .../index/seqno/ReplicationTrackerTests.java | 9 +- .../seqno/RetentionLeaseSyncActionTests.java | 237 ++++++++++++++++++ .../index/seqno/RetentionLeaseSyncIT.java | 96 +++++++ .../index/shard/IndexShardIT.java | 1 + .../shard/IndexShardRetentionLeaseTests.java | 15 +- ...dicesLifecycleListenerSingleNodeTests.java | 2 +- ...actIndicesClusterStateServiceTestCase.java | 4 +- ...ClusterStateServiceRandomUpdatesTests.java | 3 +- .../snapshots/SnapshotsServiceTests.java | 10 + .../index/engine/EngineTestCase.java | 12 +- .../index/shard/IndexShardTestCase.java | 1 + .../authz/privilege/SystemPrivilege.java | 1 + .../authz/privilege/PrivilegeTests.java | 3 + .../authz/AuthorizationServiceTests.java | 11 +- 24 files changed, 855 insertions(+), 54 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java create mode 100644 server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncer.java create mode 100644 server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java create mode 100644 server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index 134d130fddcbe..279a616160000 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -189,7 +189,7 @@ public synchronized void onSuccess(boolean forcedRefresh) { /** * Result of taking the action on the replica. */ - protected static class WriteReplicaResult> + public static class WriteReplicaResult> extends ReplicaResult implements RespondingWriteResult { public final Location location; boolean finishedAsyncActions; diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 3eaad1eee5460..604d27a1e70c8 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -54,6 +54,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexShard; @@ -310,7 +311,11 @@ private long getAvgShardSizeInBytes() throws IOException { } } - public synchronized IndexShard createShard(ShardRouting routing, Consumer globalCheckpointSyncer) throws IOException { + public synchronized IndexShard createShard( + final ShardRouting routing, + final Consumer globalCheckpointSyncer, + final RetentionLeaseSyncer retentionLeaseSyncer) throws IOException { + Objects.requireNonNull(retentionLeaseSyncer); /* * TODO: we execute this in parallel but it's a synced method. Yet, we might * be able to serialize the execution via the cluster state in the future. for now we just @@ -398,6 +403,7 @@ public synchronized IndexShard createShard(ShardRouting routing, Consumer globalCheckpointSyncer.accept(shardId), + (retentionLeases, listener) -> retentionLeaseSyncer.syncRetentionLeasesForShard(shardId, retentionLeases, listener), circuitBreakerService); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index f309512ec98b6..85d5f6f62c61d 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -22,6 +22,8 @@ import com.carrotsearch.hppc.ObjectLongHashMap; import com.carrotsearch.hppc.ObjectLongMap; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; @@ -35,6 +37,7 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -43,6 +46,7 @@ import java.util.Objects; import java.util.OptionalLong; import java.util.Set; +import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.LongConsumer; import java.util.function.LongSupplier; @@ -142,6 +146,12 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L */ private final LongSupplier currentTimeMillisSupplier; + /** + * A callback when a new retention lease is created. In practice, this callback invokes the retention lease sync action, to sync + * retention leases to replicas. + */ + private final BiConsumer, ActionListener> onNewRetentionLease; + /** * This set contains allocation IDs for which there is a thread actively waiting for the local checkpoint to advance to at least the * current global checkpoint. @@ -156,7 +166,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L private final Map retentionLeases = new HashMap<>(); /** - * Get all non-expired retention leases tracker on this shard. An unmodifiable copy of the retention leases is returned. + * Get all non-expired retention leases tracked on this shard. An unmodifiable copy of the retention leases is returned. * * @return the retention leases */ @@ -174,15 +184,60 @@ public synchronized Collection getRetentionLeases() { } /** - * Adds a new or updates an existing retention lease. + * Adds a new retention lease. + * + * @param id the identifier of the retention lease + * @param retainingSequenceNumber the retaining sequence number + * @param source the source of the retention lease + * @param listener the callback when the retention lease is successfully added and synced to replicas + * @return the new retention lease + * @throws IllegalArgumentException if the specified retention lease already exists + */ + public RetentionLease addRetentionLease( + final String id, + final long retainingSequenceNumber, + final String source, + final ActionListener listener) { + Objects.requireNonNull(listener); + final RetentionLease retentionLease; + final Collection currentRetentionLeases; + synchronized (this) { + assert primaryMode; + if (retentionLeases.containsKey(id)) { + throw new IllegalArgumentException("retention lease with ID [" + id + "] already exists"); + } + retentionLease = new RetentionLease(id, retainingSequenceNumber, currentTimeMillisSupplier.getAsLong(), source); + retentionLeases.put(id, retentionLease); + currentRetentionLeases = retentionLeases.values(); + } + onNewRetentionLease.accept(Collections.unmodifiableCollection(new ArrayList<>(currentRetentionLeases)), listener); + return retentionLease; + } + + /** + * Renews an existing retention lease. * * @param id the identifier of the retention lease * @param retainingSequenceNumber the retaining sequence number * @param source the source of the retention lease + * @return the renewed retention lease + * @throws IllegalArgumentException if the specified retention lease does not exist */ - public synchronized void addOrUpdateRetentionLease(final String id, final long retainingSequenceNumber, final String source) { + public synchronized RetentionLease renewRetentionLease(final String id, final long retainingSequenceNumber, final String source) { assert primaryMode; - retentionLeases.put(id, new RetentionLease(id, retainingSequenceNumber, currentTimeMillisSupplier.getAsLong(), source)); + if (retentionLeases.containsKey(id) == false) { + throw new IllegalArgumentException("retention lease with ID [" + id + "] does not exist"); + } + final RetentionLease retentionLease = + new RetentionLease(id, retainingSequenceNumber, currentTimeMillisSupplier.getAsLong(), source); + final RetentionLease existingRetentionLease = retentionLeases.put(id, retentionLease); + assert existingRetentionLease != null; + assert existingRetentionLease.retainingSequenceNumber() <= retentionLease.retainingSequenceNumber() : + "retention lease renewal for [" + id + "]" + + " from [" + source + "]" + + " renewed a lower retaining sequence number [" + retentionLease.retainingSequenceNumber() + "]" + + " than the current lease retaining sequence number [" + existingRetentionLease.retainingSequenceNumber() + "]"; + return retentionLease; } /** @@ -440,10 +495,11 @@ private static long inSyncCheckpointStates( * Initialize the global checkpoint service. The specified global checkpoint should be set to the last known global checkpoint, or * {@link SequenceNumbers#UNASSIGNED_SEQ_NO}. * - * @param shardId the shard ID - * @param allocationId the allocation ID - * @param indexSettings the index settings - * @param globalCheckpoint the last known global checkpoint for this shard, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} + * @param shardId the shard ID + * @param allocationId the allocation ID + * @param indexSettings the index settings + * @param globalCheckpoint the last known global checkpoint for this shard, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} + * @param onNewRetentionLease a callback when a new retention lease is created */ public ReplicationTracker( final ShardId shardId, @@ -451,7 +507,8 @@ public ReplicationTracker( final IndexSettings indexSettings, final long globalCheckpoint, final LongConsumer onGlobalCheckpointUpdated, - final LongSupplier currentTimeMillisSupplier) { + final LongSupplier currentTimeMillisSupplier, + final BiConsumer, ActionListener> onNewRetentionLease) { super(shardId, indexSettings); assert globalCheckpoint >= SequenceNumbers.UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint; this.shardAllocationId = allocationId; @@ -462,6 +519,7 @@ public ReplicationTracker( checkpoints.put(allocationId, new CheckpointState(SequenceNumbers.UNASSIGNED_SEQ_NO, globalCheckpoint, false, false)); this.onGlobalCheckpointUpdated = Objects.requireNonNull(onGlobalCheckpointUpdated); this.currentTimeMillisSupplier = Objects.requireNonNull(currentTimeMillisSupplier); + this.onNewRetentionLease = Objects.requireNonNull(onNewRetentionLease); this.pendingInSync = new HashSet<>(); this.routingTable = null; this.replicationGroup = null; diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java new file mode 100644 index 0000000000000..3b7df41f72d05 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncAction.java @@ -0,0 +1,206 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.WriteResponse; +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.action.support.replication.TransportWriteAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardClosedException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collection; +import java.util.Objects; + +/** + * Write action responsible for syncing retention leases to replicas. This action is deliberately a write action so that if a replica misses + * a retention lease sync then that shard will be marked as stale. + */ +public class RetentionLeaseSyncAction extends + TransportWriteAction { + + public static String ACTION_NAME = "indices:admin/seq_no/retention_lease_sync"; + + private static final Logger LOGGER = LogManager.getLogger(RetentionLeaseSyncAction.class); + + protected Logger getLogger() { + return LOGGER; + } + + @Inject + public RetentionLeaseSyncAction( + final Settings settings, + final TransportService transportService, + final ClusterService clusterService, + final IndicesService indicesService, + final ThreadPool threadPool, + final ShardStateAction shardStateAction, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver) { + super( + settings, + ACTION_NAME, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + indexNameExpressionResolver, + RetentionLeaseSyncAction.Request::new, + RetentionLeaseSyncAction.Request::new, + ThreadPool.Names.MANAGEMENT); + } + + /** + * Sync the specified retention leases for the specified shard. The callback is invoked when the sync succeeds or fails. + * + * @param shardId the shard to sync + * @param retentionLeases the retention leases to sync + * @param listener the callback to invoke when the sync completes normally or abnormally + */ + public void syncRetentionLeasesForShard( + final ShardId shardId, + final Collection retentionLeases, + final ActionListener listener) { + Objects.requireNonNull(shardId); + Objects.requireNonNull(retentionLeases); + Objects.requireNonNull(listener); + final ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we have to execute under the system context so that if security is enabled the sync is authorized + threadContext.markAsSystemContext(); + execute( + new RetentionLeaseSyncAction.Request(shardId, retentionLeases), + ActionListener.wrap( + listener::onResponse, + e -> { + if (ExceptionsHelper.unwrap(e, AlreadyClosedException.class, IndexShardClosedException.class) == null) { + getLogger().warn(new ParameterizedMessage("{} retention lease sync failed", shardId), e); + } + listener.onFailure(e); + })); + } + } + + @Override + protected WritePrimaryResult shardOperationOnPrimary(final Request request, final IndexShard primary) { + Objects.requireNonNull(request); + Objects.requireNonNull(primary); + // we flush to ensure that retention leases are committed + flush(primary); + return new WritePrimaryResult<>(request, new Response(), null, null, primary, logger); + } + + @Override + protected WriteReplicaResult shardOperationOnReplica(final Request request, final IndexShard replica) { + Objects.requireNonNull(request); + Objects.requireNonNull(replica); + replica.updateRetentionLeasesOnReplica(request.getRetentionLeases()); + // we flush to ensure that retention leases are committed + flush(replica); + return new WriteReplicaResult<>(request, null, null, replica, logger); + } + + private void flush(final IndexShard indexShard) { + final FlushRequest flushRequest = new FlushRequest(); + flushRequest.force(true); + flushRequest.waitIfOngoing(true); + indexShard.flush(flushRequest); + } + + public static final class Request extends ReplicatedWriteRequest { + + private Collection retentionLeases; + + public Collection getRetentionLeases() { + return retentionLeases; + } + + public Request() { + + } + + public Request(final ShardId shardId, final Collection retentionLeases) { + super(Objects.requireNonNull(shardId)); + this.retentionLeases = Objects.requireNonNull(retentionLeases); + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + retentionLeases = in.readList(RetentionLease::new); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(Objects.requireNonNull(out)); + out.writeCollection(retentionLeases); + } + + @Override + public String toString() { + return "Request{" + + "retentionLeases=" + retentionLeases + + ", shardId=" + shardId + + ", timeout=" + timeout + + ", index='" + index + '\'' + + ", waitForActiveShards=" + waitForActiveShards + + '}'; + } + + } + + public static final class Response extends ReplicationResponse implements WriteResponse { + + @Override + public void setForcedRefresh(final boolean forcedRefresh) { + // ignore + } + + } + + @Override + protected Response newResponseInstance() { + return new Response(); + } + +} diff --git a/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncer.java b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncer.java new file mode 100644 index 0000000000000..1e276eb98adaf --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/seqno/RetentionLeaseSyncer.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.index.shard.ShardId; + +import java.util.Collection; + +/** + * A functional interface that represents a method for syncing retention leases to replica shards after a new retention lease is added on + * the primary. + */ +@FunctionalInterface +public interface RetentionLeaseSyncer { + + /** + * Represents a method that when invoked syncs retention leases to replica shards after a new retention lease is added on the primary. + * The specified listener is invoked when the syncing completes with success or failure. + * + * @param shardId the shard ID + * @param retentionLeases the retention leases to sync + * @param listener the callback when sync completes + */ + void syncRetentionLeasesForShard( + ShardId shardId, + Collection retentionLeases, + ActionListener listener); + +} diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index f92eb38349246..dc43d42c94a5c 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -40,6 +40,7 @@ import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; @@ -266,6 +267,7 @@ public IndexShard( final List searchOperationListener, final List listeners, final Runnable globalCheckpointSyncer, + final BiConsumer, ActionListener> retentionLeaseSyncer, final CircuitBreakerService circuitBreakerService) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -313,7 +315,8 @@ public IndexShard( indexSettings, UNASSIGNED_SEQ_NO, globalCheckpointListeners::globalCheckpointUpdated, - threadPool::absoluteTimeInMillis); + threadPool::absoluteTimeInMillis, + retentionLeaseSyncer); // the query cache is a node-level thing, however we want the most popular filters // to be computed on a per-shard basis @@ -1882,18 +1885,61 @@ public void addGlobalCheckpointListener( this.globalCheckpointListeners.add(waitingForGlobalCheckpoint, listener, timeout); } + /** + * Get all non-expired retention leases tracked on this shard. An unmodifiable copy of the retention leases is returned. + * + * @return the retention leases + */ + public Collection getRetentionLeases() { + verifyNotClosed(); + return replicationTracker.getRetentionLeases(); + } /** - * Adds a new or updates an existing retention lease. + * Adds a new retention lease. * * @param id the identifier of the retention lease * @param retainingSequenceNumber the retaining sequence number * @param source the source of the retention lease + * @param listener the callback when the retention lease is successfully added and synced to replicas + * @return the new retention lease + * @throws IllegalArgumentException if the specified retention lease already exists + */ + public RetentionLease addRetentionLease( + final String id, + final long retainingSequenceNumber, + final String source, + final ActionListener listener) { + Objects.requireNonNull(listener); + assert assertPrimaryMode(); + verifyNotClosed(); + return replicationTracker.addRetentionLease(id, retainingSequenceNumber, source, listener); + } + + /** + * Renews an existing retention lease. + * + * @param id the identifier of the retention lease + * @param retainingSequenceNumber the retaining sequence number + * @param source the source of the retention lease + * @return the renewed retention lease + * @throws IllegalArgumentException if the specified retention lease does not exist */ - void addOrUpdateRetentionLease(final String id, final long retainingSequenceNumber, final String source) { + public RetentionLease renewRetentionLease(final String id, final long retainingSequenceNumber, final String source) { assert assertPrimaryMode(); verifyNotClosed(); - replicationTracker.addOrUpdateRetentionLease(id, retainingSequenceNumber, source); + return replicationTracker.renewRetentionLease(id, retainingSequenceNumber, source); + } + + /** + * Updates retention leases on a replica. + * + * @param retentionLeases the retention leases + */ + public void updateRetentionLeasesOnReplica(final Collection retentionLeases) { + assert assertReplicationTarget(); + verifyNotClosed(); + replicationTracker.updateRetentionLeasesOnReplica(retentionLeases); } /** diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index b881ba73a28e6..fa42776403dca 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -94,6 +94,7 @@ import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.search.stats.SearchStats; +import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IllegalIndexShardStateException; import org.elasticsearch.index.shard.IndexEventListener; @@ -592,10 +593,12 @@ public IndexShard createShard( final PeerRecoveryTargetService.RecoveryListener recoveryListener, final RepositoriesService repositoriesService, final Consumer onShardFailure, - final Consumer globalCheckpointSyncer) throws IOException { + final Consumer globalCheckpointSyncer, + final RetentionLeaseSyncer retentionLeaseSyncer) throws IOException { + Objects.requireNonNull(retentionLeaseSyncer); ensureChangesAllowed(); IndexService indexService = indexService(shardRouting.index()); - IndexShard indexShard = indexService.createShard(shardRouting, globalCheckpointSyncer); + IndexShard indexShard = indexService.createShard(shardRouting, globalCheckpointSyncer, retentionLeaseSyncer); indexShard.addShardFailureCallback(onShardFailure); indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, (type, mapping) -> { diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index c8afe92be8d37..80ac05ece8274 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -56,6 +56,8 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; import org.elasticsearch.index.seqno.ReplicationTracker; +import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; +import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardRelocatedException; @@ -83,6 +85,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; @@ -121,6 +124,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple private final List buildInIndexListener; private final PrimaryReplicaSyncer primaryReplicaSyncer; private final Consumer globalCheckpointSyncer; + private final RetentionLeaseSyncer retentionLeaseSyncer; @Inject public IndicesClusterStateService( @@ -137,7 +141,8 @@ public IndicesClusterStateService( final PeerRecoverySourceService peerRecoverySourceService, final SnapshotShardsService snapshotShardsService, final PrimaryReplicaSyncer primaryReplicaSyncer, - final GlobalCheckpointSyncAction globalCheckpointSyncAction) { + final GlobalCheckpointSyncAction globalCheckpointSyncAction, + final RetentionLeaseSyncAction retentionLeaseSyncAction) { this( settings, (AllocatedIndices>) indicesService, @@ -152,7 +157,8 @@ public IndicesClusterStateService( peerRecoverySourceService, snapshotShardsService, primaryReplicaSyncer, - globalCheckpointSyncAction::updateGlobalCheckpointForShard); + globalCheckpointSyncAction::updateGlobalCheckpointForShard, + Objects.requireNonNull(retentionLeaseSyncAction)::syncRetentionLeasesForShard); } // for tests @@ -170,7 +176,8 @@ public IndicesClusterStateService( final PeerRecoverySourceService peerRecoverySourceService, final SnapshotShardsService snapshotShardsService, final PrimaryReplicaSyncer primaryReplicaSyncer, - final Consumer globalCheckpointSyncer) { + final Consumer globalCheckpointSyncer, + final RetentionLeaseSyncer retentionLeaseSyncer) { this.settings = settings; this.buildInIndexListener = Arrays.asList( @@ -188,6 +195,7 @@ public IndicesClusterStateService( this.repositoriesService = repositoriesService; this.primaryReplicaSyncer = primaryReplicaSyncer; this.globalCheckpointSyncer = globalCheckpointSyncer; + this.retentionLeaseSyncer = Objects.requireNonNull(retentionLeaseSyncer); this.sendRefreshMapping = settings.getAsBoolean("indices.cluster.send_refresh_mapping", true); } @@ -576,7 +584,8 @@ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardR new RecoveryListener(shardRouting), repositoriesService, failedShardHandler, - globalCheckpointSyncer); + globalCheckpointSyncer, + retentionLeaseSyncer); } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed to create shard", e, state); } @@ -870,6 +879,7 @@ U createIndex(IndexMetaData indexMetaData, * @param repositoriesService service responsible for snapshot/restore * @param onShardFailure a callback when this shard fails * @param globalCheckpointSyncer a callback when this shard syncs the global checkpoint + * @param retentionLeaseSyncer a callback when this shard syncs retention leases * @return a new shard * @throws IOException if an I/O exception occurs when creating the shard */ @@ -880,7 +890,8 @@ T createShard( PeerRecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService, Consumer onShardFailure, - Consumer globalCheckpointSyncer) throws IOException; + Consumer globalCheckpointSyncer, + RetentionLeaseSyncer retentionLeaseSyncer) throws IOException; /** * Returns shard for the specified id if it exists otherwise returns null. diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index 2854cc87d8695..3dafb93d65400 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.seqno; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -30,8 +31,11 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.LongSupplier; +import java.util.stream.Collectors; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.hamcrest.Matchers.equalTo; @@ -41,39 +45,83 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTestCase { - public void testAddOrUpdateRetentionLease() { - final AllocationId id = AllocationId.newInitializing(); + public void testAddOrRenewRetentionLease() { + final AllocationId allocationId = AllocationId.newInitializing(); final ReplicationTracker replicationTracker = new ReplicationTracker( new ShardId("test", "_na", 0), - id.getId(), + allocationId.getId(), IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), UNASSIGNED_SEQ_NO, value -> {}, - () -> 0L); + () -> 0L, + (leases, listener) -> {}); replicationTracker.updateFromMaster( randomNonNegativeLong(), - Collections.singleton(id.getId()), - routingTable(Collections.emptySet(), id), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), Collections.emptySet()); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); final long[] minimumRetainingSequenceNumbers = new long[length]; for (int i = 0; i < length; i++) { minimumRetainingSequenceNumbers[i] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); - replicationTracker.addOrUpdateRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i); + replicationTracker.addRetentionLease( + Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> {})); assertRetentionLeases(replicationTracker, i + 1, minimumRetainingSequenceNumbers, () -> 0L); } for (int i = 0; i < length; i++) { minimumRetainingSequenceNumbers[i] = randomLongBetween(minimumRetainingSequenceNumbers[i], Long.MAX_VALUE); - replicationTracker.addOrUpdateRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i); + replicationTracker.renewRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i); assertRetentionLeases(replicationTracker, length, minimumRetainingSequenceNumbers, () -> 0L); } + } + + public void testOnNewRetentionLease() { + final AllocationId allocationId = AllocationId.newInitializing(); + final Map retentionLeases = new HashMap<>(); + final AtomicBoolean invoked = new AtomicBoolean(); + final AtomicReference reference = new AtomicReference<>(); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + UNASSIGNED_SEQ_NO, + value -> {}, + () -> 0L, + (leases, listener) -> { + // we do not want to hold a lock on the replication tracker in the callback! + assertFalse(Thread.holdsLock(reference.get())); + invoked.set(true); + assertThat( + leases.stream().collect(Collectors.toMap(RetentionLease::id, RetentionLease::retainingSequenceNumber)), + equalTo(retentionLeases)); + }); + reference.set(replicationTracker); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), + Collections.emptySet()); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + final int length = randomIntBetween(0, 8); + for (int i = 0; i < length; i++) { + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + retentionLeases.put(id, retainingSequenceNumber); + replicationTracker.addRetentionLease(id, retainingSequenceNumber, "test", ActionListener.wrap(() -> {})); + // assert that the new retention lease callback was invoked + assertTrue(invoked.get()); + // reset the invocation marker so that we can assert the callback was not invoked when renewing the lease + invoked.set(false); + replicationTracker.renewRetentionLease(id, retainingSequenceNumber, "test"); + assertFalse(invoked.get()); + } } public void testExpiration() { - final AllocationId id = AllocationId.newInitializing(); + final AllocationId allocationId = AllocationId.newInitializing(); final AtomicLong currentTimeMillis = new AtomicLong(randomLongBetween(0, 1024)); final long retentionLeaseMillis = randomLongBetween(1, TimeValue.timeValueHours(12).millis()); final Settings settings = Settings @@ -82,20 +130,21 @@ public void testExpiration() { .build(); final ReplicationTracker replicationTracker = new ReplicationTracker( new ShardId("test", "_na", 0), - id.getId(), + allocationId.getId(), IndexSettingsModule.newIndexSettings("test", settings), UNASSIGNED_SEQ_NO, value -> {}, - currentTimeMillis::get); + currentTimeMillis::get, + (leases, listener) -> {}); replicationTracker.updateFromMaster( randomNonNegativeLong(), - Collections.singleton(id.getId()), - routingTable(Collections.emptySet(), id), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), Collections.emptySet()); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final long[] retainingSequenceNumbers = new long[1]; retainingSequenceNumbers[0] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); - replicationTracker.addOrUpdateRetentionLease("0", retainingSequenceNumbers[0], "test-0"); + replicationTracker.addRetentionLease("0", retainingSequenceNumbers[0], "test-0", ActionListener.wrap(() -> {})); { final Collection retentionLeases = replicationTracker.getRetentionLeases(); @@ -108,7 +157,7 @@ public void testExpiration() { // renew the lease currentTimeMillis.set(currentTimeMillis.get() + randomLongBetween(0, 1024)); retainingSequenceNumbers[0] = randomLongBetween(retainingSequenceNumbers[0], Long.MAX_VALUE); - replicationTracker.addOrUpdateRetentionLease("0", retainingSequenceNumbers[0], "test-0"); + replicationTracker.renewRetentionLease("0", retainingSequenceNumbers[0], "test-0"); { final Collection retentionLeases = replicationTracker.getRetentionLeases(); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTestCase.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTestCase.java index 9b1f951a030fe..a36006a5fc4c1 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTestCase.java @@ -47,7 +47,8 @@ ReplicationTracker newTracker( IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), UNASSIGNED_SEQ_NO, updatedGlobalCheckpoint, - currentTimeMillisSupplier); + currentTimeMillisSupplier, + (leases, listener) -> {}); } static IndexShardRoutingTable routingTable(final Set initializingIds, final AllocationId primaryId) { diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java index 001e50af57c79..b61e3f647b9d2 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.index.seqno; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; @@ -47,6 +49,7 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.LongConsumer; import java.util.stream.Collectors; @@ -683,10 +686,12 @@ public void testPrimaryContextHandoff() throws IOException { final AllocationId primaryAllocationId = clusterState.routingTable.primaryShard().allocationId(); final LongConsumer onUpdate = updatedGlobalCheckpoint -> {}; final long globalCheckpoint = UNASSIGNED_SEQ_NO; + final BiConsumer, ActionListener> onNewRetentionLease = + (leases, listener) -> {}; ReplicationTracker oldPrimary = new ReplicationTracker( - shardId, primaryAllocationId.getId(), indexSettings, globalCheckpoint, onUpdate, () -> 0L); + shardId, primaryAllocationId.getId(), indexSettings, globalCheckpoint, onUpdate, () -> 0L, onNewRetentionLease); ReplicationTracker newPrimary = new ReplicationTracker( - shardId, primaryAllocationId.getRelocationId(), indexSettings, globalCheckpoint, onUpdate, () -> 0L); + shardId, primaryAllocationId.getRelocationId(), indexSettings, globalCheckpoint, onUpdate, () -> 0L, onNewRetentionLease); Set allocationIds = new HashSet<>(Arrays.asList(oldPrimary.shardAllocationId, newPrimary.shardAllocationId)); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java new file mode 100644 index 0000000000000..898309524b4f0 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache license, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the license for the specific language governing permissions and + * limitations under the license. + */ + +package org.elasticsearch.index.seqno; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.TransportWriteAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardClosedException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.mockito.ArgumentCaptor; + +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.mock.orig.Mockito.verifyNoMoreInteractions; +import static org.elasticsearch.mock.orig.Mockito.when; +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Matchers.same; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class RetentionLeaseSyncActionTests extends ESTestCase { + + private ThreadPool threadPool; + private CapturingTransport transport; + private ClusterService clusterService; + private TransportService transportService; + private ShardStateAction shardStateAction; + + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getClass().getName()); + transport = new CapturingTransport(); + clusterService = createClusterService(threadPool); + transportService = transport.createTransportService(clusterService.getSettings(), threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, Collections.emptySet()); + transportService.start(); + transportService.acceptIncomingRequests(); + shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool); + } + + public void tearDown() throws Exception { + try { + IOUtils.close(transportService, clusterService, transport); + } finally { + terminate(threadPool); + } + super.tearDown(); + } + + public void testRetentionLeaseSyncActionOnPrimary() { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + + final RetentionLeaseSyncAction action = new RetentionLeaseSyncAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver()); + @SuppressWarnings("unchecked") final Collection retentionLeases = + (Collection) mock(Collection.class); + final RetentionLeaseSyncAction.Request request = + new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); + + final TransportWriteAction.WritePrimaryResult result = + action.shardOperationOnPrimary(request, indexShard); + // the retention leases on the shard should be flushed + final ArgumentCaptor flushRequest = ArgumentCaptor.forClass(FlushRequest.class); + verify(indexShard).flush(flushRequest.capture()); + assertTrue(flushRequest.getValue().force()); + assertTrue(flushRequest.getValue().waitIfOngoing()); + // we should forward the request containing the current retention leases to the replica + assertThat(result.replicaRequest(), sameInstance(request)); + // we should start with an empty replication response + assertNull(result.finalResponseIfSuccessful.getShardInfo()); + } + + public void testRetentionLeaseSyncActionOnReplica() { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + + final RetentionLeaseSyncAction action = new RetentionLeaseSyncAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver()); + @SuppressWarnings("unchecked") final Collection retentionLeases = + (Collection) mock(Collection.class); + final RetentionLeaseSyncAction.Request request = + new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); + + final TransportWriteAction.WriteReplicaResult result = action.shardOperationOnReplica(request, indexShard); + // the retention leases on the shard should be updated + verify(indexShard).updateRetentionLeasesOnReplica(retentionLeases); + // the retention leases on the shard should be flushed + final ArgumentCaptor flushRequest = ArgumentCaptor.forClass(FlushRequest.class); + verify(indexShard).flush(flushRequest.capture()); + assertTrue(flushRequest.getValue().force()); + assertTrue(flushRequest.getValue().waitIfOngoing()); + // the result should indicate success + final AtomicBoolean success = new AtomicBoolean(); + result.respond(ActionListener.wrap(r -> success.set(true), e -> fail(e.toString()))); + assertTrue(success.get()); + } + + public void testRetentionLeaseSyncExecution() { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + + final Logger retentionLeaseSyncActionLogger = mock(Logger.class); + + @SuppressWarnings("unchecked") final Collection retentionLeases = + (Collection) mock(Collection.class); + final AtomicBoolean invoked = new AtomicBoolean(); + final RetentionLeaseSyncAction action = new RetentionLeaseSyncAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()), + new IndexNameExpressionResolver()) { + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + assertTrue(threadPool.getThreadContext().isSystemContext()); + assertThat(request.shardId(), sameInstance(indexShard.shardId())); + assertThat(request.getRetentionLeases(), sameInstance(retentionLeases)); + if (randomBoolean()) { + listener.onResponse(new Response()); + } else { + final Exception e = randomFrom( + new AlreadyClosedException("closed"), + new IndexShardClosedException(indexShard.shardId()), + new RuntimeException("failed")); + listener.onFailure(e); + if (e instanceof AlreadyClosedException == false && e instanceof IndexShardClosedException == false) { + final ArgumentCaptor captor = ArgumentCaptor.forClass(ParameterizedMessage.class); + verify(retentionLeaseSyncActionLogger).warn(captor.capture(), same(e)); + final ParameterizedMessage message = captor.getValue(); + assertThat(message.getFormat(), equalTo("{} retention lease sync failed")); + assertThat(message.getParameters(), arrayContaining(indexShard.shardId())); + } + verifyNoMoreInteractions(retentionLeaseSyncActionLogger); + } + invoked.set(true); + } + + @Override + protected Logger getLogger() { + return retentionLeaseSyncActionLogger; + } + }; + + // execution happens on the test thread, so no need to register an actual listener to callback + action.syncRetentionLeasesForShard(indexShard.shardId(), retentionLeases, ActionListener.wrap(() -> {})); + assertTrue(invoked.get()); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java new file mode 100644 index 0000000000000..fad9e25db12d6 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.seqno; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +public class RetentionLeaseSyncIT extends ESIntegTestCase { + + public void testRetentionLeasesSyncedOnAdd() throws Exception { + final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2); + internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", numberOfReplicas) + .build(); + createIndex("index", settings); + ensureGreen("index"); + final String primaryShardNodeId = clusterService().state().routingTable().index("index").shard(0).primaryShard().currentNodeId(); + final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName(); + final IndexShard primary = internalCluster() + .getInstance(IndicesService.class, primaryShardNodeName) + .getShardOrNull(new ShardId(resolveIndex("index"), 0)); + // we will add multiple retention leases and expect to see them synced to all replicas + final int length = randomIntBetween(1, 8); + final Map currentRetentionLeases = new HashMap<>(); + for (int i = 0; i < length; i++) { + final String id = randomValueOtherThanMany(currentRetentionLeases.keySet()::contains, () -> randomAlphaOfLength(8)); + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + final String source = randomAlphaOfLength(8); + final CountDownLatch latch = new CountDownLatch(1); + final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + currentRetentionLeases.put(id, primary.addRetentionLease(id, retainingSequenceNumber, source, listener)); + latch.await(); + + // check retention leases have been committed on the primary + final Collection primaryCommittedRetentionLeases = RetentionLease.decodeRetentionLeases( + primary.acquireLastIndexCommit(false).getIndexCommit().getUserData().get(Engine.RETENTION_LEASES)); + assertThat(currentRetentionLeases, equalTo(toMap(primaryCommittedRetentionLeases))); + + // check current retention leases have been synced to all replicas + for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) { + final String replicaShardNodeId = replicaShard.currentNodeId(); + final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName(); + final IndexShard replica = internalCluster() + .getInstance(IndicesService.class, replicaShardNodeName) + .getShardOrNull(new ShardId(resolveIndex("index"), 0)); + final Map retentionLeasesOnReplica = toMap(replica.getRetentionLeases()); + assertThat(retentionLeasesOnReplica, equalTo(currentRetentionLeases)); + + // check retention leases have been committed on the replica + final Collection replicaCommittedRetentionLeases = RetentionLease.decodeRetentionLeases( + replica.acquireLastIndexCommit(false).getIndexCommit().getUserData().get(Engine.RETENTION_LEASES)); + assertThat(currentRetentionLeases, equalTo(toMap(replicaCommittedRetentionLeases))); + } + } + } + + private static Map toMap(final Collection replicaCommittedRetentionLeases) { + return replicaCommittedRetentionLeases.stream().collect(Collectors.toMap(RetentionLease::id, Function.identity())); + } + +} diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 58c4844a11bfc..36560dd96c627 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -663,6 +663,7 @@ public static final IndexShard newIndexShard( Collections.emptyList(), Arrays.asList(listeners), () -> {}, + (leases, listener) -> {}, cbs); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java index d0018a0a864f8..eff1edfed52ba 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.shard; import org.apache.lucene.index.SegmentInfos; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRoutingHelper; @@ -71,20 +72,21 @@ protected void tearDownThreadPool() { } - public void testAddOrUpdateRetentionLease() throws IOException { + public void testAddOrRenewRetentionLease() throws IOException { final IndexShard indexShard = newStartedShard(true); try { final int length = randomIntBetween(0, 8); final long[] minimumRetainingSequenceNumbers = new long[length]; for (int i = 0; i < length; i++) { minimumRetainingSequenceNumbers[i] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); - indexShard.addOrUpdateRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i); + indexShard.addRetentionLease( + Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> {})); assertRetentionLeases(indexShard, i + 1, minimumRetainingSequenceNumbers, () -> 0L); } for (int i = 0; i < length; i++) { minimumRetainingSequenceNumbers[i] = randomLongBetween(minimumRetainingSequenceNumbers[i], Long.MAX_VALUE); - indexShard.addOrUpdateRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i); + indexShard.renewRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i); assertRetentionLeases(indexShard, length, minimumRetainingSequenceNumbers, () -> 0L); } } finally { @@ -103,7 +105,7 @@ public void testExpiration() throws IOException { try { final long[] retainingSequenceNumbers = new long[1]; retainingSequenceNumbers[0] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); - indexShard.addOrUpdateRetentionLease("0", retainingSequenceNumbers[0], "test-0"); + indexShard.addRetentionLease("0", retainingSequenceNumbers[0], "test-0", ActionListener.wrap(() -> {})); { final Collection retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get(); @@ -116,7 +118,7 @@ public void testExpiration() throws IOException { // renew the lease currentTimeMillis.set(currentTimeMillis.get() + randomLongBetween(0, 1024)); retainingSequenceNumbers[0] = randomLongBetween(retainingSequenceNumbers[0], Long.MAX_VALUE); - indexShard.addOrUpdateRetentionLease("0", retainingSequenceNumbers[0], "test-0"); + indexShard.renewRetentionLease("0", retainingSequenceNumbers[0], "test-0"); { final Collection retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get(); @@ -150,7 +152,8 @@ public void testCommit() throws IOException { for (int i = 0; i < length; i++) { minimumRetainingSequenceNumbers[i] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); currentTimeMillis.set(TimeUnit.NANOSECONDS.toMillis(randomNonNegativeLong())); - indexShard.addOrUpdateRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i); + indexShard.addRetentionLease( + Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> {})); } currentTimeMillis.set(TimeUnit.NANOSECONDS.toMillis(Long.MAX_VALUE)); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 769cdfc8a9b53..d240bb01fefb1 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -130,7 +130,7 @@ public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRem newRouting = newRouting.moveToUnassigned(unassignedInfo) .updateUnassigned(unassignedInfo, RecoverySource.EmptyStoreRecoverySource.INSTANCE); newRouting = ShardRoutingHelper.initialize(newRouting, nodeId); - IndexShard shard = index.createShard(newRouting, s -> {}); + IndexShard shard = index.createShard(newRouting, s -> {}, (s, leases, listener) -> {}); IndexShardTestCase.updateRoutingEntry(shard, newRouting); assertEquals(5, counter.get()); final DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index b4c3d65115155..f248d46b11744 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardState; @@ -233,7 +234,8 @@ public MockIndexShard createShard( final PeerRecoveryTargetService.RecoveryListener recoveryListener, final RepositoriesService repositoriesService, final Consumer onShardFailure, - final Consumer globalCheckpointSyncer) throws IOException { + final Consumer globalCheckpointSyncer, + final RetentionLeaseSyncer retentionLeaseSyncer) throws IOException { failRandomly(); MockIndexService indexService = indexService(recoveryState.getShardId().getIndex()); MockIndexShard indexShard = indexService.createShard(shardRouting); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 9dd8d5c5b660d..b400b56b34d55 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -480,7 +480,8 @@ private IndicesClusterStateService createIndicesClusterStateService(DiscoveryNod null, null, primaryReplicaSyncer, - s -> {}); + s -> {}, + (s, leases, listener) -> {}); } private class RecordingIndicesService extends MockIndicesService { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java index 24f97b67c1458..8b750939238cb 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java @@ -82,6 +82,7 @@ import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; +import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; import org.elasticsearch.index.shard.PrimaryReplicaSyncer; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -538,6 +539,15 @@ protected void assertSnapshotOrGenericThread() { actionFilters, indexNameExpressionResolver)), new GlobalCheckpointSyncAction( + settings, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + indexNameExpressionResolver), + new RetentionLeaseSyncAction( settings, transportService, clusterService, diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 1e3dbef92c30a..35667b0f87a1c 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -642,7 +642,7 @@ public EngineConfig config( final CircuitBreakerService breakerService) { final IndexWriterConfig iwc = newIndexWriterConfig(); final TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); - final Engine.EventListener listener = new Engine.EventListener() {}; // we don't need to notify anybody in this test + final Engine.EventListener eventListener = new Engine.EventListener() {}; // we don't need to notify anybody in this test final List extRefreshListenerList = externalRefreshListener == null ? emptyList() : Collections.singletonList(externalRefreshListener); final List intRefreshListenerList = @@ -652,7 +652,13 @@ public EngineConfig config( if (maybeGlobalCheckpointSupplier == null) { assert maybeRetentionLeasesSupplier == null; final ReplicationTracker replicationTracker = new ReplicationTracker( - shardId, allocationId.getId(), indexSettings, SequenceNumbers.NO_OPS_PERFORMED, update -> {}, () -> 0L); + shardId, + allocationId.getId(), + indexSettings, + SequenceNumbers.NO_OPS_PERFORMED, + update -> {}, + () -> 0L, + (leases, listener) -> {}); globalCheckpointSupplier = replicationTracker; retentionLeasesSupplier = replicationTracker::getRetentionLeases; } else { @@ -671,7 +677,7 @@ public EngineConfig config( iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), - listener, + eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 10c9f399d4cbb..acb4911f9c69d 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -385,6 +385,7 @@ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMe Collections.emptyList(), Arrays.asList(listeners), globalCheckpointSyncer, + (leases, listener) -> {}, breakerService); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); success = true; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java index ca8318212c9ee..4deff6870144c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java @@ -25,6 +25,7 @@ public final class SystemPrivilege extends Privilege { "indices:admin/template/put", // needed for the TemplateUpgradeService "indices:admin/template/delete", // needed for the TemplateUpgradeService "indices:admin/seq_no/global_checkpoint_sync*", // needed for global checkpoint syncs + "indices:admin/seq_no/retention_lease_sync*", // needed for retention lease syncs "indices:admin/settings/update" // needed for DiskThresholdMonitor.markIndicesReadOnly ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java index 1484e7a878141..77be9f3b1b1f3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java @@ -127,6 +127,9 @@ public void testSystem() throws Exception { assertThat(predicate.test("indices:admin/seq_no/global_checkpoint_sync"), is(true)); assertThat(predicate.test("indices:admin/seq_no/global_checkpoint_sync[p]"), is(true)); assertThat(predicate.test("indices:admin/seq_no/global_checkpoint_sync[r]"), is(true)); + assertThat(predicate.test("indices:admin/seq_no/retention_lease_sync"), is(true)); + assertThat(predicate.test("indices:admin/seq_no/retention_lease_sync[p]"), is(true)); + assertThat(predicate.test("indices:admin/seq_no/retention_lease_sync[r]"), is(true)); assertThat(predicate.test("indices:admin/settings/update"), is(true)); assertThat(predicate.test("indices:admin/settings/foo"), is(false)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 2a9d832f0f012..dccc8f3ce587a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -247,8 +247,15 @@ public void testActionsForSystemUserIsAuthorized() { // A failure would throw an exception final Authentication authentication = createAuthentication(SystemUser.INSTANCE); - final String[] actions = { "indices:monitor/whatever", "internal:whatever", "cluster:monitor/whatever", "cluster:admin/reroute", - "indices:admin/mapping/put", "indices:admin/template/put", "indices:admin/seq_no/global_checkpoint_sync", + final String[] actions = { + "indices:monitor/whatever", + "internal:whatever", + "cluster:monitor/whatever", + "cluster:admin/reroute", + "indices:admin/mapping/put", + "indices:admin/template/put", + "indices:admin/seq_no/global_checkpoint_sync", + "indices:admin/seq_no/retention_lease_sync", "indices:admin/settings/update" }; for (String action : actions) { authorize(authentication, action, request); From 3801925cf0f69f253a3e5b9af1d8889fca392313 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sun, 27 Jan 2019 08:13:47 -0500 Subject: [PATCH 14/57] Copy retention leases under lock When adding a retention lease, we make a reference copy of the retention leases under lock and then make a copy of that collection outside of the lock. However, since we merely copied a reference to the retention leases, after leaving a lock the underlying collection could change on us. Rather, we want to copy these under lock. This commit adds a dedicated method for doing this, asserts that we hold a lock when we use this method, and changes adding a retention lease to use this method. This commit was intended to be included with #37398 but was pushed to the wrong branch. --- .../elasticsearch/index/seqno/ReplicationTracker.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 85d5f6f62c61d..7e85602289205 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -165,6 +165,11 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L private final Map retentionLeases = new HashMap<>(); + private Collection copyRetentionLeases() { + assert Thread.holdsLock(this); + return Collections.unmodifiableCollection(new ArrayList<>(retentionLeases.values())); + } + /** * Get all non-expired retention leases tracked on this shard. An unmodifiable copy of the retention leases is returned. * @@ -208,9 +213,9 @@ public RetentionLease addRetentionLease( } retentionLease = new RetentionLease(id, retainingSequenceNumber, currentTimeMillisSupplier.getAsLong(), source); retentionLeases.put(id, retentionLease); - currentRetentionLeases = retentionLeases.values(); + currentRetentionLeases = copyRetentionLeases(); } - onNewRetentionLease.accept(Collections.unmodifiableCollection(new ArrayList<>(currentRetentionLeases)), listener); + onNewRetentionLease.accept(currentRetentionLeases, listener); return retentionLease; } From f24dce11228cc58ddbbbda5409b0bef701f71b4d Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sun, 27 Jan 2019 08:16:14 -0500 Subject: [PATCH 15/57] Fix newlines in retention lease sync action tests There is a method invocation here spanning multiple lines. This commit breaks it up into a line per parameter as this is friendlier to future changes and diffs. --- .../index/seqno/RetentionLeaseSyncActionTests.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java index 898309524b4f0..0cd85ef60f21a 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncActionTests.java @@ -70,8 +70,13 @@ public void setUp() throws Exception { threadPool = new TestThreadPool(getClass().getName()); transport = new CapturingTransport(); clusterService = createClusterService(threadPool); - transportService = transport.createTransportService(clusterService.getSettings(), threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null, Collections.emptySet()); + transportService = transport.createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> clusterService.localNode(), + null, + Collections.emptySet()); transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool); From 66ddd8d2f77d19334c36145e4bf4389de107cb1c Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Sun, 27 Jan 2019 23:07:32 +0200 Subject: [PATCH 16/57] Create snapshot role (#35820) This commit introduces the `create_snapshot` cluster privilege and the `snapshot_user` role. This role is to be used by "cronable" tools that call the snapshot API periodically without recurring to the `manage` cluster privilege. The `create_snapshot` cluster privilege is much more limited compared to the `manage` privilege. The `snapshot_user` role grants the privileges to view the metadata of all indices (including restricted ones, i.e. .security). It obviously grants the create snapshot privilege but the repository has to be created using another role. In addition, it grants the privileges to (only) GET repositories and snapshots, but not create and delete them. The role does not allow to create repositories. This distinction is important because snapshotting equates to the `read` index privilege if the user has control of the snapshot destination, but this is not the case in this instance, because the role does not grant control over repository configuration. --- .../SecurityDocumentationIT.java | 4 +- .../authz/privilege/ClusterPrivilege.java | 8 ++ .../authz/privilege/IndexPrivilege.java | 4 +- .../authz/store/ReservedRolesStore.java | 7 + .../authz/store/ReservedRolesStoreTests.java | 50 +++++++ .../integration/ClusterPrivilegeTests.java | 33 ++++- .../authz/SnapshotUserRoleIntegTests.java | 131 ++++++++++++++++++ 7 files changed, 229 insertions(+), 8 deletions(-) create mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index b7261b2dd9581..fa10de4fe4ce9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -638,8 +638,8 @@ public void testGetRoles() throws Exception { List roles = response.getRoles(); assertNotNull(response); - // 23 system roles plus the three we created - assertThat(roles.size(), equalTo(26)); + // 24 system roles plus the three we created + assertThat(roles.size(), equalTo(27)); } { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java index fba595e7a09e4..f3822dac4e4f5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java @@ -6,6 +6,10 @@ package org.elasticsearch.xpack.core.security.authz.privilege; import org.apache.lucene.util.automaton.Automaton; +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsAction; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; @@ -48,6 +52,8 @@ public final class ClusterPrivilege extends Privilege { private static final Automaton MANAGE_ROLLUP_AUTOMATON = patterns("cluster:admin/xpack/rollup/*", "cluster:monitor/xpack/rollup/*"); private static final Automaton MANAGE_CCR_AUTOMATON = patterns("cluster:admin/xpack/ccr/*", ClusterStateAction.NAME, HasPrivilegesAction.NAME); + private static final Automaton CREATE_SNAPSHOT_AUTOMATON = patterns(CreateSnapshotAction.NAME, SnapshotsStatusAction.NAME + "*", + GetSnapshotsAction.NAME, SnapshotsStatusAction.NAME, GetRepositoriesAction.NAME); private static final Automaton READ_CCR_AUTOMATON = patterns(ClusterStateAction.NAME, HasPrivilegesAction.NAME); private static final Automaton MANAGE_ILM_AUTOMATON = patterns("cluster:admin/ilm/*"); private static final Automaton READ_ILM_AUTOMATON = patterns(GetLifecycleAction.NAME, GetStatusAction.NAME); @@ -73,6 +79,7 @@ public final class ClusterPrivilege extends Privilege { public static final ClusterPrivilege MANAGE_PIPELINE = new ClusterPrivilege("manage_pipeline", "cluster:admin/ingest/pipeline/*"); public static final ClusterPrivilege MANAGE_CCR = new ClusterPrivilege("manage_ccr", MANAGE_CCR_AUTOMATON); public static final ClusterPrivilege READ_CCR = new ClusterPrivilege("read_ccr", READ_CCR_AUTOMATON); + public static final ClusterPrivilege CREATE_SNAPSHOT = new ClusterPrivilege("create_snapshot", CREATE_SNAPSHOT_AUTOMATON); public static final ClusterPrivilege MANAGE_ILM = new ClusterPrivilege("manage_ilm", MANAGE_ILM_AUTOMATON); public static final ClusterPrivilege READ_ILM = new ClusterPrivilege("read_ilm", READ_ILM_AUTOMATON); @@ -98,6 +105,7 @@ public final class ClusterPrivilege extends Privilege { .put("manage_rollup", MANAGE_ROLLUP) .put("manage_ccr", MANAGE_CCR) .put("read_ccr", READ_CCR) + .put("create_snapshot", CREATE_SNAPSHOT) .put("manage_ilm", MANAGE_ILM) .put("read_ilm", READ_ILM) .immutableMap(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java index 3a92c08704e41..d24863d6d53c4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java @@ -64,8 +64,8 @@ public final class IndexPrivilege extends Privilege { CloseIndexAction.NAME + "*"); private static final Automaton MANAGE_ILM_AUTOMATON = patterns("indices:admin/ilm/*"); - public static final IndexPrivilege NONE = new IndexPrivilege("none", Automatons.EMPTY); - public static final IndexPrivilege ALL = new IndexPrivilege("all", ALL_AUTOMATON); + public static final IndexPrivilege NONE = new IndexPrivilege("none", Automatons.EMPTY); + public static final IndexPrivilege ALL = new IndexPrivilege("all", ALL_AUTOMATON); public static final IndexPrivilege READ = new IndexPrivilege("read", READ_AUTOMATON); public static final IndexPrivilege READ_CROSS_CLUSTER = new IndexPrivilege("read_cross_cluster", READ_CROSS_CLUSTER_AUTOMATON); public static final IndexPrivilege CREATE = new IndexPrivilege("create", CREATE_AUTOMATON); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index 8cb151da4e2b1..2c30b5fe1affe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.authz.store; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.xpack.core.monitoring.action.MonitoringBulkAction; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; @@ -179,6 +180,12 @@ private static Map initializeReservedRoles() { RoleDescriptor.IndicesPrivileges.builder() .indices(".code-*").privileges("read").build() }, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) + .put("snapshot_user", new RoleDescriptor("snapshot_user", new String[] { "create_snapshot", GetRepositoriesAction.NAME }, + new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() + .indices("*") + .privileges("view_index_metadata") + .allowRestrictedIndices(true) + .build() }, null, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA, null)) .immutableMap(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index dc077a17e5c59..e6e1dd1d06825 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -7,8 +7,13 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteAction; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsAction; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction; import org.elasticsearch.action.admin.indices.create.CreateIndexAction; @@ -173,9 +178,54 @@ public void testIsReserved() { assertThat(ReservedRolesStore.isReserved(APMSystemUser.ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved(RemoteMonitoringUser.COLLECTION_ROLE_NAME), is(true)); assertThat(ReservedRolesStore.isReserved(RemoteMonitoringUser.INDEXING_ROLE_NAME), is(true)); + assertThat(ReservedRolesStore.isReserved("snapshot_user"), is(true)); assertThat(ReservedRolesStore.isReserved("code_admin"), is(true)); assertThat(ReservedRolesStore.isReserved("code_user"), is(true)); + } + + public void testSnapshotUserRole() { + final TransportRequest request = mock(TransportRequest.class); + + RoleDescriptor roleDescriptor = new ReservedRolesStore().roleDescriptor("snapshot_user"); + assertNotNull(roleDescriptor); + assertThat(roleDescriptor.getMetadata(), hasEntry("_reserved", true)); + + Role snapshotUserRole = Role.builder(roleDescriptor, null).build(); + assertThat(snapshotUserRole.cluster().check(GetRepositoriesAction.NAME, request), is(true)); + assertThat(snapshotUserRole.cluster().check(CreateSnapshotAction.NAME, request), is(true)); + assertThat(snapshotUserRole.cluster().check(SnapshotsStatusAction.NAME, request), is(true)); + assertThat(snapshotUserRole.cluster().check(GetSnapshotsAction.NAME, request), is(true)); + + assertThat(snapshotUserRole.cluster().check(PutRepositoryAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(GetIndexTemplatesAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(DeleteIndexTemplateAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(PutPipelineAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(GetPipelineAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(DeletePipelineAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(ClusterRerouteAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(ClusterUpdateSettingsAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(MonitoringBulkAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(GetWatchAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(PutWatchAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(DeleteWatchAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(ExecuteWatchAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(AckWatchAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(ActivateWatchAction.NAME, request), is(false)); + assertThat(snapshotUserRole.cluster().check(WatcherServiceAction.NAME, request), is(false)); + + assertThat(snapshotUserRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(randomAlphaOfLengthBetween(8, 24)), is(false)); + assertThat(snapshotUserRole.indices().allowedIndicesMatcher("indices:foo").test(randomAlphaOfLengthBetween(8, 24)), is(false)); + assertThat(snapshotUserRole.indices().allowedIndicesMatcher(GetAction.NAME).test(randomAlphaOfLengthBetween(8, 24)), is(false)); + assertThat(snapshotUserRole.indices().allowedIndicesMatcher(GetAction.NAME).test(randomAlphaOfLengthBetween(8, 24)), is(false)); + + assertThat(snapshotUserRole.indices().allowedIndicesMatcher(GetIndexAction.NAME) + .test(randomAlphaOfLengthBetween(8, 24)), is(true)); + assertThat(snapshotUserRole.indices().allowedIndicesMatcher(GetIndexAction.NAME) + .test(RestrictedIndicesNames.INTERNAL_SECURITY_INDEX), is(true)); + assertThat(snapshotUserRole.indices().allowedIndicesMatcher(GetIndexAction.NAME) + .test(RestrictedIndicesNames.SECURITY_INDEX_NAME), is(true)); + assertNoAccessAllowed(snapshotUserRole, RestrictedIndicesNames.NAMES_SET); } public void testIngestAdminRole() { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java index bf81fd77dc59d..3b30982784b76 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java @@ -32,12 +32,15 @@ public class ClusterPrivilegeTests extends AbstractPrivilegeTestCase { "role_c:\n" + " indices:\n" + " - names: 'someindex'\n" + - " privileges: [ all ]\n"; + " privileges: [ all ]\n" + + "role_d:\n" + + " cluster: [ create_snapshot ]\n"; private static final String USERS_ROLES = "role_a:user_a\n" + "role_b:user_b\n" + - "role_c:user_c\n"; + "role_c:user_c\n" + + "role_d:user_d\n"; private static Path repositoryLocation; @@ -75,8 +78,8 @@ protected String configUsers() { return super.configUsers() + "user_a:" + usersPasswdHashed + "\n" + "user_b:" + usersPasswdHashed + "\n" + - "user_c:" + usersPasswdHashed + "\n"; - + "user_c:" + usersPasswdHashed + "\n" + + "user_d:" + usersPasswdHashed + "\n"; } @Override @@ -122,6 +125,18 @@ public void testThatClusterPrivilegesWorkAsExpectedViaHttp() throws Exception { assertAccessIsDenied("user_c", "GET", "/_nodes/infos"); assertAccessIsDenied("user_c", "POST", "/_cluster/reroute"); assertAccessIsDenied("user_c", "PUT", "/_cluster/settings", "{ \"transient\" : { \"search.default_search_timeout\": \"1m\" } }"); + + // user_d can view repos and create and view snapshots on existings repos, everything else is DENIED + assertAccessIsDenied("user_d", "GET", "/_cluster/state"); + assertAccessIsDenied("user_d", "GET", "/_cluster/health"); + assertAccessIsDenied("user_d", "GET", "/_cluster/settings"); + assertAccessIsDenied("user_d", "GET", "/_cluster/stats"); + assertAccessIsDenied("user_d", "GET", "/_cluster/pending_tasks"); + assertAccessIsDenied("user_d", "GET", "/_nodes/stats"); + assertAccessIsDenied("user_d", "GET", "/_nodes/hot_threads"); + assertAccessIsDenied("user_d", "GET", "/_nodes/infos"); + assertAccessIsDenied("user_d", "POST", "/_cluster/reroute"); + assertAccessIsDenied("user_d", "PUT", "/_cluster/settings", "{ \"transient\" : { \"search.default_search_timeout\": \"1m\" } }"); } public void testThatSnapshotAndRestore() throws Exception { @@ -129,6 +144,7 @@ public void testThatSnapshotAndRestore() throws Exception { repositoryLocation.toString()).endObject().endObject()); assertAccessIsDenied("user_b", "PUT", "/_snapshot/my-repo", repoJson); assertAccessIsDenied("user_c", "PUT", "/_snapshot/my-repo", repoJson); + assertAccessIsDenied("user_d", "PUT", "/_snapshot/my-repo", repoJson); assertAccessIsAllowed("user_a", "PUT", "/_snapshot/my-repo", repoJson); Request createBar = new Request("PUT", "/someindex/bar/1"); @@ -136,6 +152,7 @@ public void testThatSnapshotAndRestore() throws Exception { createBar.addParameter("refresh", "true"); assertAccessIsDenied("user_a", createBar); assertAccessIsDenied("user_b", createBar); + assertAccessIsDenied("user_d", createBar); assertAccessIsAllowed("user_c", createBar); assertAccessIsDenied("user_b", "PUT", "/_snapshot/my-repo/my-snapshot", "{ \"indices\": \"someindex\" }"); @@ -145,30 +162,38 @@ public void testThatSnapshotAndRestore() throws Exception { assertAccessIsDenied("user_b", "GET", "/_snapshot/my-repo/my-snapshot/_status"); assertAccessIsDenied("user_c", "GET", "/_snapshot/my-repo/my-snapshot/_status"); assertAccessIsAllowed("user_a", "GET", "/_snapshot/my-repo/my-snapshot/_status"); + assertAccessIsAllowed("user_d", "GET", "/_snapshot/my-repo/my-snapshot/_status"); // This snapshot needs to be finished in order to be restored waitForSnapshotToFinish("my-repo", "my-snapshot"); + // user_d can create snapshots, but not concurrently + assertAccessIsAllowed("user_d", "PUT", "/_snapshot/my-repo/my-snapshot-d", "{ \"indices\": \"someindex\" }"); assertAccessIsDenied("user_a", "DELETE", "/someindex"); assertAccessIsDenied("user_b", "DELETE", "/someindex"); + assertAccessIsDenied("user_d", "DELETE", "/someindex"); assertAccessIsAllowed("user_c", "DELETE", "/someindex"); Request restoreSnapshotRequest = new Request("POST", "/_snapshot/my-repo/my-snapshot/_restore"); restoreSnapshotRequest.addParameter("wait_for_completion", "true"); assertAccessIsDenied("user_b", restoreSnapshotRequest); assertAccessIsDenied("user_c", restoreSnapshotRequest); + assertAccessIsDenied("user_d", restoreSnapshotRequest); assertAccessIsAllowed("user_a", restoreSnapshotRequest); assertAccessIsDenied("user_a", "GET", "/someindex/bar/1"); assertAccessIsDenied("user_b", "GET", "/someindex/bar/1"); + assertAccessIsDenied("user_d", "GET", "/someindex/bar/1"); assertAccessIsAllowed("user_c", "GET", "/someindex/bar/1"); assertAccessIsDenied("user_b", "DELETE", "/_snapshot/my-repo/my-snapshot"); assertAccessIsDenied("user_c", "DELETE", "/_snapshot/my-repo/my-snapshot"); + assertAccessIsDenied("user_d", "DELETE", "/_snapshot/my-repo/my-snapshot"); assertAccessIsAllowed("user_a", "DELETE", "/_snapshot/my-repo/my-snapshot"); assertAccessIsDenied("user_b", "DELETE", "/_snapshot/my-repo"); assertAccessIsDenied("user_c", "DELETE", "/_snapshot/my-repo"); + assertAccessIsDenied("user_d", "DELETE", "/_snapshot/my-repo"); assertAccessIsAllowed("user_a", "DELETE", "/_snapshot/my-repo"); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java new file mode 100644 index 0000000000000..9a6909aad26fd --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java @@ -0,0 +1,131 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.security.authz; + +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.indices.get.GetIndexResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.NativeRealmIntegTestCase; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Locale; + +import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.INTERNAL_SECURITY_INDEX; +import static org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames.SECURITY_INDEX_NAME; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationException; + +public class SnapshotUserRoleIntegTests extends NativeRealmIntegTestCase { + + private Client client; + private String ordinaryIndex; + + @Before + public void setupClusterBeforeSnapshot() { + logger.info("--> creating repository"); + assertAcked(client().admin().cluster().preparePutRepository("repo") + .setType("fs") + .setSettings(Settings.builder().put("location", randomRepoPath()))); + + logger.info("--> creating ordinary index"); + final int shards = between(1, 10); + ordinaryIndex = randomAlphaOfLength(4).toLowerCase(Locale.ROOT); + assertAcked(prepareCreate(ordinaryIndex, 0, Settings.builder().put("number_of_shards", shards).put("number_of_replicas", 0))); + ensureGreen(); + + logger.info("--> creating snapshot_user user"); + final String user = "snapshot_user"; + final char[] password = new char[] {'p', 'a', 's', 's', 'w', 'o', 'r', 'd'}; + final String snapshotUserToken = basicAuthHeaderValue(user, new SecureString(password)); + client = client().filterWithHeader(Collections.singletonMap("Authorization", snapshotUserToken)); + securityClient().preparePutUser(user, password, Hasher.BCRYPT, "snapshot_user").get(); + ensureGreen(INTERNAL_SECURITY_INDEX); + } + + public void testSnapshotUserRoleCanSnapshotAndSeeAllIndices() { + // view repositories + final GetRepositoriesResponse getRepositoriesResponse = client.admin().cluster().prepareGetRepositories(randomFrom("*", "_all")) + .get(); + assertThat(getRepositoriesResponse.repositories().size(), is(1)); + assertThat(getRepositoriesResponse.repositories().get(0).name(), is("repo")); + // view all indices, including restricted ones + final GetIndexResponse getIndexResponse = client.admin().indices().prepareGetIndex().setIndices(randomFrom("_all", "*")).get(); + assertThat(Arrays.asList(getIndexResponse.indices()), containsInAnyOrder(INTERNAL_SECURITY_INDEX, ordinaryIndex)); + // create snapshot that includes restricted indices + final CreateSnapshotResponse snapshotResponse = client.admin().cluster().prepareCreateSnapshot("repo", "snap") + .setIndices(randomFrom("_all", "*")).setWaitForCompletion(true).get(); + assertThat(snapshotResponse.getSnapshotInfo().state(), is(SnapshotState.SUCCESS)); + assertThat(snapshotResponse.getSnapshotInfo().indices(), containsInAnyOrder(INTERNAL_SECURITY_INDEX, ordinaryIndex)); + // view snapshots for repo + final GetSnapshotsResponse getSnapshotResponse = client.admin().cluster().prepareGetSnapshots("repo").get(); + assertThat(getSnapshotResponse.getSnapshots().size(), is(1)); + assertThat(getSnapshotResponse.getSnapshots().get(0).snapshotId().getName(), is("snap")); + assertThat(getSnapshotResponse.getSnapshots().get(0).indices(), containsInAnyOrder(INTERNAL_SECURITY_INDEX, ordinaryIndex)); + } + + public void testSnapshotUserRoleIsReserved() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> securityClient().preparePutRole("snapshot_user").get()); + assertThat(e.getMessage(), containsString("role [snapshot_user] is reserved and cannot be modified")); + e = expectThrows(IllegalArgumentException.class, + () -> securityClient().prepareDeleteRole("snapshot_user").get()); + assertThat(e.getMessage(), containsString("role [snapshot_user] is reserved and cannot be deleted")); + } + + public void testSnapshotUserRoleUnathorizedForDestructiveActions() { + // try search all + assertThrowsAuthorizationException(() -> client.prepareSearch(randomFrom("_all", "*")).get(), "indices:data/read/search", + "snapshot_user"); + // try create index + assertThrowsAuthorizationException(() -> client.admin().indices().prepareCreate(ordinaryIndex + "2").get(), "indices:admin/create", + "snapshot_user"); + // try create another repo + assertThrowsAuthorizationException( + () -> client.admin().cluster().preparePutRepository("some_other_repo").setType("fs") + .setSettings(Settings.builder().put("location", randomRepoPath())).get(), + "cluster:admin/repository/put", "snapshot_user"); + // try delete repo + assertThrowsAuthorizationException(() -> client.admin().cluster().prepareDeleteRepository("repo").get(), + "cluster:admin/repository/delete", "snapshot_user"); + // try fumble with snapshots + assertThrowsAuthorizationException( + () -> client.admin().cluster().prepareRestoreSnapshot("repo", randomAlphaOfLength(4).toLowerCase(Locale.ROOT)).get(), + "cluster:admin/snapshot/restore", "snapshot_user"); + assertThrowsAuthorizationException( + () -> client.admin().cluster().prepareDeleteSnapshot("repo", randomAlphaOfLength(4).toLowerCase(Locale.ROOT)).get(), + "cluster:admin/snapshot/delete", "snapshot_user"); + // try destructive/revealing actions on all indices + for (final String indexToTest : Arrays.asList(INTERNAL_SECURITY_INDEX, SECURITY_INDEX_NAME, ordinaryIndex)) { + assertThrowsAuthorizationException(() -> client.prepareSearch(indexToTest).get(), "indices:data/read/search", "snapshot_user"); + assertThrowsAuthorizationException(() -> client.prepareGet(indexToTest, "doc", "1").get(), "indices:data/read/get", + "snapshot_user"); + assertThrowsAuthorizationException(() -> client.prepareIndex(indexToTest, "doc").setSource("term", "val").get(), + "indices:data/write/index", "snapshot_user"); + assertThrowsAuthorizationException(() -> client.prepareUpdate(indexToTest, "doc", "1").setDoc("term", "val").get(), + "indices:data/write/update", "snapshot_user"); + assertThrowsAuthorizationException(() -> client.prepareDelete(indexToTest, "doc", "1").get(), "indices:data/write/delete", + "snapshot_user"); + + assertThrowsAuthorizationException(() -> client.admin().indices().prepareDelete(indexToTest).get(), "indices:admin/delete", + "snapshot_user"); + } + } + +} From b1735aa93b557f173abf4c2337917cabdfcd30e0 Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Sun, 27 Jan 2019 16:02:22 -0800 Subject: [PATCH 17/57] Support both typed and typeless 'get mapping' requests in the HLRC. (#37796) From previous PRs, we've already added support for include_type_name to the get mapping API. We had also taken an approach to the HLRC where the server-side `GetMappingResponse#fromXContent` could only handle typeless input. This PR updates the HLRC for 'get mapping' to be in line with our new approach: * Add a typeless 'get mappings' method to the Java HLRC, that accepts new client-side request and response objects. This new response only handles typeless mapping definitions. * Switch the old version of `GetMappingResponse` back to expecting typed mappings, and deprecate the corresponding method on the HLRC. Finally, the PR also does some small, related clean-up around 'get field mappings'. --- .../elasticsearch/client/IndicesClient.java | 78 ++++++++++-- .../client/IndicesRequestConverters.java | 21 ++- .../client/indices/GetMappingsRequest.java | 74 +++++++++++ .../client/indices/GetMappingsResponse.java | 71 +++++++++++ .../elasticsearch/client/IndicesClientIT.java | 54 +++++++- .../client/IndicesRequestConvertersTests.java | 80 ++++++++---- .../IndicesClientDocumentationIT.java | 19 ++- .../GetFieldMappingsResponseTests.java | 7 +- .../indices/GetMappingsResponseTests.java | 120 ++++++++++++++++++ .../high-level/indices/get_mappings.asciidoc | 7 +- .../mapping/get/GetMappingsResponse.java | 21 +-- .../admin/indices/RestGetMappingAction.java | 4 +- .../get/GetFieldMappingsResponseTests.java | 4 +- .../mapping/get/GetMappingsResponseTests.java | 70 ++-------- 14 files changed, 494 insertions(+), 136 deletions(-) create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetMappingsRequest.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetMappingsResponse.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetMappingsResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index 24c0175b7e884..c93c9c67e8f7f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -37,8 +37,6 @@ import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.client.indices.GetFieldMappingsRequest; import org.elasticsearch.client.indices.GetFieldMappingsResponse; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -61,6 +59,8 @@ import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.client.indices.FreezeIndexRequest; import org.elasticsearch.client.indices.GetIndexTemplatesRequest; +import org.elasticsearch.client.indices.GetMappingsRequest; +import org.elasticsearch.client.indices.GetMappingsResponse; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutMappingRequest; import org.elasticsearch.client.indices.UnfreezeIndexRequest; @@ -261,8 +261,11 @@ public void putMappingAsync(org.elasticsearch.action.admin.indices.mapping.put.P * @throws IOException in case there is a problem sending the request or parsing back the response */ public GetMappingsResponse getMapping(GetMappingsRequest getMappingsRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getMappingsRequest, IndicesRequestConverters::getMappings, options, - GetMappingsResponse::fromXContent, emptySet()); + return restHighLevelClient.performRequestAndParseEntity(getMappingsRequest, + IndicesRequestConverters::getMappings, + options, + GetMappingsResponse::fromXContent, + emptySet()); } /** @@ -275,8 +278,60 @@ public GetMappingsResponse getMapping(GetMappingsRequest getMappingsRequest, Req */ public void getMappingAsync(GetMappingsRequest getMappingsRequest, RequestOptions options, ActionListener listener) { - restHighLevelClient.performRequestAsyncAndParseEntity(getMappingsRequest, IndicesRequestConverters::getMappings, options, - GetMappingsResponse::fromXContent, listener, emptySet()); + restHighLevelClient.performRequestAsyncAndParseEntity(getMappingsRequest, + IndicesRequestConverters::getMappings, + options, + GetMappingsResponse::fromXContent, + listener, + emptySet()); + } + + /** + * Retrieves the mappings on an index or indices using the Get Mapping API. + * See + * Get Mapping API on elastic.co + * @param getMappingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + * + * @deprecated This method uses old request and response objects which still refer to types, a deprecated + * feature. The method {@link #getMapping(GetMappingsRequest, RequestOptions)} should be used instead, which + * accepts a new request object. + */ + @Deprecated + public org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse getMapping( + org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest getMappingsRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(getMappingsRequest, + IndicesRequestConverters::getMappings, + options, + org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse::fromXContent, + emptySet()); + } + + /** + * Asynchronously retrieves the mappings on an index on indices using the Get Mapping API. + * See + * Get Mapping API on elastic.co + * @param getMappingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * + * @deprecated This method uses old request and response objects which still refer to types, a deprecated feature. + * The method {@link #getMapping(GetMappingsRequest, RequestOptions)} should be used instead, which accepts a new + * request object. + */ + @Deprecated + public void getMappingAsync(org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest getMappingsRequest, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(getMappingsRequest, + IndicesRequestConverters::getMappings, + options, + org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse::fromXContent, + listener, + emptySet()); } /** @@ -288,8 +343,9 @@ public void getMappingAsync(GetMappingsRequest getMappingsRequest, RequestOption * @return the response * @throws IOException in case there is a problem sending the request or parsing back the response * - * @deprecated This method uses an old request object which still refers to types, a deprecated feature. The method - * {@link #getFieldMapping(GetFieldMappingsRequest, RequestOptions)} should be used instead, which accepts a new request object. + * @deprecated This method uses old request and response objects which still refer to types, a deprecated feature. + * The method {@link #getFieldMapping(GetFieldMappingsRequest, RequestOptions)} should be used instead, which + * accepts a new request object. */ @Deprecated public org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse getFieldMapping( @@ -307,9 +363,9 @@ public org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRespon * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion * - * @deprecated This method uses an old request object which still refers to types, a deprecated feature. The - * method {@link #getFieldMappingAsync(GetFieldMappingsRequest, RequestOptions, ActionListener)} should be used instead, - * which accepts a new request object. + * @deprecated This method uses old request and response objects which still refer to types, a deprecated feature. + * The method {@link #getFieldMappingAsync(GetFieldMappingsRequest, RequestOptions, ActionListener)} should be + * used instead, which accepts a new request object. */ @Deprecated public void getFieldMappingAsync(org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest getFieldMappingsRequest, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java index 2b44e3006b1be..4889ead93b715 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java @@ -35,7 +35,6 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.client.indices.GetFieldMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; @@ -49,6 +48,7 @@ import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.client.indices.FreezeIndexRequest; import org.elasticsearch.client.indices.GetIndexTemplatesRequest; +import org.elasticsearch.client.indices.GetMappingsRequest; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutMappingRequest; import org.elasticsearch.client.indices.UnfreezeIndexRequest; @@ -148,6 +148,7 @@ static Request putMapping(PutMappingRequest putMappingRequest) throws IOExceptio return request; } + @Deprecated static Request putMapping(org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest putMappingRequest) throws IOException { // The concreteIndex is an internal concept, not applicable to requests made over the REST API. if (putMappingRequest.getConcreteIndex() != null) { @@ -166,7 +167,21 @@ static Request putMapping(org.elasticsearch.action.admin.indices.mapping.put.Put return request; } - static Request getMappings(GetMappingsRequest getMappingsRequest) throws IOException { + static Request getMappings(GetMappingsRequest getMappingsRequest) { + String[] indices = getMappingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getMappingsRequest.indices(); + + Request request = new Request(HttpGet.METHOD_NAME, RequestConverters.endpoint(indices, "_mapping")); + + RequestConverters.Params parameters = new RequestConverters.Params(request); + parameters.withMasterTimeout(getMappingsRequest.masterNodeTimeout()); + parameters.withIndicesOptions(getMappingsRequest.indicesOptions()); + parameters.withLocal(getMappingsRequest.local()); + + return request; + } + + @Deprecated + static Request getMappings(org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest getMappingsRequest) { String[] indices = getMappingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getMappingsRequest.indices(); String[] types = getMappingsRequest.types() == null ? Strings.EMPTY_ARRAY : getMappingsRequest.types(); @@ -176,6 +191,7 @@ static Request getMappings(GetMappingsRequest getMappingsRequest) throws IOExcep parameters.withMasterTimeout(getMappingsRequest.masterNodeTimeout()); parameters.withIndicesOptions(getMappingsRequest.indicesOptions()); parameters.withLocal(getMappingsRequest.local()); + parameters.putParam(INCLUDE_TYPE_NAME_PARAMETER, "true"); return request; } @@ -201,6 +217,7 @@ static Request getFieldMapping(GetFieldMappingsRequest getFieldMappingsRequest) return request; } + @Deprecated static Request getFieldMapping(org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest getFieldMappingsRequest) { String[] indices = getFieldMappingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.indices(); String[] types = getFieldMappingsRequest.types() == null ? Strings.EMPTY_ARRAY : getFieldMappingsRequest.types(); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetMappingsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetMappingsRequest.java new file mode 100644 index 0000000000000..0bc4ba4af77c1 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetMappingsRequest.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.common.Strings; + +public class GetMappingsRequest extends TimedRequest { + + private boolean local = false; + private boolean includeDefaults = false; + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + /** + * Indicates whether the receiving node should operate based on local index information or + * forward requests, where needed, to other nodes. If running locally, request will not + * raise errors if local index information is missing. + */ + public GetMappingsRequest local(boolean local) { + this.local = local; + return this; + } + + public boolean local() { + return local; + } + + public GetMappingsRequest indices(String... indices) { + this.indices = indices; + return this; + } + + public GetMappingsRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + public String[] indices() { + return indices; + } + + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public boolean includeDefaults() { + return includeDefaults; + } + + /** Indicates whether default mapping settings should be returned */ + public GetMappingsRequest includeDefaults(boolean includeDefaults) { + this.includeDefaults = includeDefaults; + return this; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetMappingsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetMappingsResponse.java new file mode 100644 index 0000000000000..54f569ab94b06 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indices/GetMappingsResponse.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.index.mapper.MapperService; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +public class GetMappingsResponse { + + static final ParseField MAPPINGS = new ParseField("mappings"); + + private Map mappings; + + public GetMappingsResponse(Map mappings) { + this.mappings = mappings; + } + + public Map mappings() { + return mappings; + } + + public static GetMappingsResponse fromXContent(XContentParser parser) throws IOException { + if (parser.currentToken() == null) { + parser.nextToken(); + } + + XContentParserUtils.ensureExpectedToken(parser.currentToken(), + XContentParser.Token.START_OBJECT, + parser::getTokenLocation); + + Map parts = parser.map(); + + Map mappings = new HashMap<>(); + for (Map.Entry entry : parts.entrySet()) { + String indexName = entry.getKey(); + assert entry.getValue() instanceof Map : "expected a map as type mapping, but got: " + entry.getValue().getClass(); + + @SuppressWarnings("unchecked") + final Map fieldMappings = (Map) ((Map) entry.getValue()) + .get(MAPPINGS.getPreferredName()); + + mappings.put(indexName, new MappingMetaData(MapperService.SINGLE_MAPPING_NAME, fieldMappings)); + } + + return new GetMappingsResponse(mappings); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index fe175b217bd5e..708cb6687ce9e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -41,8 +41,6 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -72,6 +70,8 @@ import org.elasticsearch.client.indices.GetFieldMappingsRequest; import org.elasticsearch.client.indices.GetFieldMappingsResponse; import org.elasticsearch.client.indices.GetIndexTemplatesRequest; +import org.elasticsearch.client.indices.GetMappingsRequest; +import org.elasticsearch.client.indices.GetMappingsResponse; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutMappingRequest; import org.elasticsearch.client.indices.UnfreezeIndexRequest; @@ -95,6 +95,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.admin.indices.RestCreateIndexAction; import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction; +import org.elasticsearch.rest.action.admin.indices.RestGetMappingAction; import org.elasticsearch.rest.action.admin.indices.RestPutMappingAction; import java.io.IOException; @@ -527,8 +528,9 @@ public void testGetMapping() throws IOException { mappingBuilder.endObject().endObject().endObject(); putMappingRequest.source(mappingBuilder); - AcknowledgedResponse putMappingResponse = - execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync); + AcknowledgedResponse putMappingResponse = execute(putMappingRequest, + highLevelClient().indices()::putMapping, + highLevelClient().indices()::putMappingAsync); assertTrue(putMappingResponse.isAcknowledged()); Map getIndexResponse = getAsMap(indexName); @@ -536,8 +538,48 @@ public void testGetMapping() throws IOException { GetMappingsRequest request = new GetMappingsRequest().indices(indexName); - GetMappingsResponse getMappingsResponse = - execute(request, highLevelClient().indices()::getMapping, highLevelClient().indices()::getMappingAsync); + GetMappingsResponse getMappingsResponse = execute( + request, + highLevelClient().indices()::getMapping, + highLevelClient().indices()::getMappingAsync); + + Map mappings = getMappingsResponse.mappings().get(indexName).sourceAsMap(); + Map type = new HashMap<>(); + type.put("type", "text"); + Map field = new HashMap<>(); + field.put("field", type); + Map expected = new HashMap<>(); + expected.put("properties", field); + assertThat(mappings, equalTo(expected)); + } + + public void testGetMappingWithTypes() throws IOException { + String indexName = "test"; + createIndex(indexName, Settings.EMPTY); + + PutMappingRequest putMappingRequest = new PutMappingRequest(indexName); + XContentBuilder mappingBuilder = JsonXContent.contentBuilder(); + mappingBuilder.startObject().startObject("properties").startObject("field"); + mappingBuilder.field("type", "text"); + mappingBuilder.endObject().endObject().endObject(); + putMappingRequest.source(mappingBuilder); + + AcknowledgedResponse putMappingResponse = execute(putMappingRequest, + highLevelClient().indices()::putMapping, + highLevelClient().indices()::putMappingAsync); + assertTrue(putMappingResponse.isAcknowledged()); + + Map getIndexResponse = getAsMap(indexName); + assertEquals("text", XContentMapValues.extractValue(indexName + ".mappings.properties.field.type", getIndexResponse)); + + org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest request = + new org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest().indices(indexName); + + org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse getMappingsResponse = execute( + request, + highLevelClient().indices()::getMapping, + highLevelClient().indices()::getMappingAsync, + expectWarnings(RestGetMappingAction.TYPES_DEPRECATION_MESSAGE)); Map mappings = getMappingsResponse.getMappings().get(indexName).get("_doc").sourceAsMap(); Map type = new HashMap<>(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java index 5e08381720ec7..409690eaa6997 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java @@ -37,7 +37,6 @@ import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; @@ -50,11 +49,12 @@ import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.client.indices.CreateIndexRequest; -import org.elasticsearch.client.indices.RandomCreateIndexGenerator; +import org.elasticsearch.client.indices.GetFieldMappingsRequest; import org.elasticsearch.client.indices.GetIndexTemplatesRequest; +import org.elasticsearch.client.indices.GetMappingsRequest; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutMappingRequest; -import org.elasticsearch.client.indices.GetFieldMappingsRequest; +import org.elasticsearch.client.indices.RandomCreateIndexGenerator; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -231,22 +231,53 @@ public void testPutMappingWithTypes() throws IOException { RequestConvertersTests.assertToXContentBody(putMappingRequest, request.getEntity()); } - public void testGetMapping() throws IOException { + public void testGetMapping() { GetMappingsRequest getMappingRequest = new GetMappingsRequest(); String[] indices = Strings.EMPTY_ARRAY; - if (ESTestCase.randomBoolean()) { + if (randomBoolean()) { indices = RequestConvertersTests.randomIndicesNames(0, 5); getMappingRequest.indices(indices); - } else if (ESTestCase.randomBoolean()) { + } else if (randomBoolean()) { + getMappingRequest.indices((String[]) null); + } + + Map expectedParams = new HashMap<>(); + RequestConvertersTests.setRandomIndicesOptions(getMappingRequest::indicesOptions, + getMappingRequest::indicesOptions, expectedParams); + RequestConvertersTests.setRandomMasterTimeout(getMappingRequest, expectedParams); + RequestConvertersTests.setRandomLocal(getMappingRequest::local, expectedParams); + + Request request = IndicesRequestConverters.getMappings(getMappingRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_mapping"); + + Assert.assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + Assert.assertThat(expectedParams, equalTo(request.getParameters())); + Assert.assertThat(HttpGet.METHOD_NAME, equalTo(request.getMethod())); + } + + public void testGetMappingWithTypes() { + org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest getMappingRequest = + new org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest(); + + String[] indices = Strings.EMPTY_ARRAY; + if (randomBoolean()) { + indices = RequestConvertersTests.randomIndicesNames(0, 5); + getMappingRequest.indices(indices); + } else if (randomBoolean()) { getMappingRequest.indices((String[]) null); } String type = null; - if (ESTestCase.randomBoolean()) { - type = ESTestCase.randomAlphaOfLengthBetween(3, 10); + if (randomBoolean()) { + type = randomAlphaOfLengthBetween(3, 10); getMappingRequest.types(type); - } else if (ESTestCase.randomBoolean()) { + } else if (randomBoolean()) { getMappingRequest.types((String[]) null); } @@ -256,6 +287,7 @@ public void testGetMapping() throws IOException { getMappingRequest::indicesOptions, expectedParams); RequestConvertersTests.setRandomMasterTimeout(getMappingRequest, expectedParams); RequestConvertersTests.setRandomLocal(getMappingRequest, expectedParams); + expectedParams.put(INCLUDE_TYPE_NAME_PARAMETER, "true"); Request request = IndicesRequestConverters.getMappings(getMappingRequest); StringJoiner endpoint = new StringJoiner("/", "/", ""); @@ -277,21 +309,21 @@ public void testGetFieldMapping() { GetFieldMappingsRequest getFieldMappingsRequest = new GetFieldMappingsRequest(); String[] indices = Strings.EMPTY_ARRAY; - if (ESTestCase.randomBoolean()) { + if (randomBoolean()) { indices = RequestConvertersTests.randomIndicesNames(0, 5); getFieldMappingsRequest.indices(indices); - } else if (ESTestCase.randomBoolean()) { + } else if (randomBoolean()) { getFieldMappingsRequest.indices((String[]) null); } String[] fields = null; - if (ESTestCase.randomBoolean()) { - fields = new String[ESTestCase.randomIntBetween(1, 5)]; + if (randomBoolean()) { + fields = new String[randomIntBetween(1, 5)]; for (int i = 0; i < fields.length; i++) { - fields[i] = ESTestCase.randomAlphaOfLengthBetween(3, 10); + fields[i] = randomAlphaOfLengthBetween(3, 10); } getFieldMappingsRequest.fields(fields); - } else if (ESTestCase.randomBoolean()) { + } else if (randomBoolean()) { getFieldMappingsRequest.fields((String[]) null); } @@ -321,29 +353,29 @@ public void testGetFieldMappingWithTypes() { new org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsRequest(); String[] indices = Strings.EMPTY_ARRAY; - if (ESTestCase.randomBoolean()) { + if (randomBoolean()) { indices = RequestConvertersTests.randomIndicesNames(0, 5); getFieldMappingsRequest.indices(indices); - } else if (ESTestCase.randomBoolean()) { + } else if (randomBoolean()) { getFieldMappingsRequest.indices((String[]) null); } String type = null; - if (ESTestCase.randomBoolean()) { - type = ESTestCase.randomAlphaOfLengthBetween(3, 10); + if (randomBoolean()) { + type = randomAlphaOfLengthBetween(3, 10); getFieldMappingsRequest.types(type); - } else if (ESTestCase.randomBoolean()) { + } else if (randomBoolean()) { getFieldMappingsRequest.types((String[]) null); } String[] fields = null; - if (ESTestCase.randomBoolean()) { - fields = new String[ESTestCase.randomIntBetween(1, 5)]; + if (randomBoolean()) { + fields = new String[randomIntBetween(1, 5)]; for (int i = 0; i < fields.length; i++) { - fields[i] = ESTestCase.randomAlphaOfLengthBetween(3, 10); + fields[i] = randomAlphaOfLengthBetween(3, 10); } getFieldMappingsRequest.fields(fields); - } else if (ESTestCase.randomBoolean()) { + } else if (randomBoolean()) { getFieldMappingsRequest.fields((String[]) null); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 0951d05b0ab54..cfd5ac4de2f82 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -42,8 +42,6 @@ import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.client.indices.GetFieldMappingsRequest; import org.elasticsearch.client.indices.GetFieldMappingsResponse; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -76,6 +74,8 @@ import org.elasticsearch.client.indices.CreateIndexResponse; import org.elasticsearch.client.indices.FreezeIndexRequest; import org.elasticsearch.client.indices.GetIndexTemplatesRequest; +import org.elasticsearch.client.indices.GetMappingsRequest; +import org.elasticsearch.client.indices.GetMappingsResponse; import org.elasticsearch.client.indices.IndexTemplatesExistRequest; import org.elasticsearch.client.indices.PutMappingRequest; import org.elasticsearch.client.indices.UnfreezeIndexRequest; @@ -591,8 +591,7 @@ public void testGetMapping() throws IOException { // end::get-mappings-request // tag::get-mappings-request-masterTimeout - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> - request.masterNodeTimeout("1m"); // <2> + request.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1> // end::get-mappings-request-masterTimeout // tag::get-mappings-request-indicesOptions @@ -604,9 +603,9 @@ public void testGetMapping() throws IOException { // end::get-mappings-execute // tag::get-mappings-response - ImmutableOpenMap> allMappings = getMappingResponse.mappings(); // <1> - MappingMetaData typeMapping = allMappings.get("twitter").get("_doc"); // <2> - Map mapping = typeMapping.sourceAsMap(); // <3> + Map allMappings = getMappingResponse.mappings(); // <1> + MappingMetaData indexMapping = allMappings.get("twitter"); // <2> + Map mapping = indexMapping.sourceAsMap(); // <3> // end::get-mappings-response Map type = new HashMap<>(); @@ -662,9 +661,9 @@ public void onFailure(Exception e) { final CountDownLatch latch = new CountDownLatch(1); final ActionListener latchListener = new LatchedActionListener<>(listener, latch); listener = ActionListener.wrap(r -> { - ImmutableOpenMap> allMappings = r.mappings(); - MappingMetaData typeMapping = allMappings.get("twitter").get("_doc"); - Map mapping = typeMapping.sourceAsMap(); + Map allMappings = r.mappings(); + MappingMetaData indexMapping = allMappings.get("twitter"); + Map mapping = indexMapping.sourceAsMap(); Map type = new HashMap<>(); type.put("type", "text"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetFieldMappingsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetFieldMappingsResponseTests.java index aa8ce3bb6c098..827b2f005aef8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetFieldMappingsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetFieldMappingsResponseTests.java @@ -44,14 +44,14 @@ public void testFromXContent() throws IOException { .test(); } - Predicate getRandomFieldsExcludeFilter() { + private Predicate getRandomFieldsExcludeFilter() { // allow random fields at the level of `index` and `index.mappings.field` // otherwise random field could be evaluated as index name or type name return s -> false == (s.matches("(?[^.]+)") || s.matches("(?[^.]+)\\.mappings\\.(?[^.]+)")); } - static GetFieldMappingsResponse createTestInstance() { + private static GetFieldMappingsResponse createTestInstance() { Map> mappings = new HashMap<>(); // if mappings is empty, means that fields are not found if (randomBoolean()) { @@ -72,12 +72,11 @@ static GetFieldMappingsResponse createTestInstance() { } // As the client class GetFieldMappingsResponse doesn't have toXContent method, adding this method here only for the test - static void toXContent(GetFieldMappingsResponse response, XContentBuilder builder) throws IOException { + private static void toXContent(GetFieldMappingsResponse response, XContentBuilder builder) throws IOException { builder.startObject(); for (Map.Entry> indexEntry : response.mappings().entrySet()) { builder.startObject(indexEntry.getKey()); builder.startObject("mappings"); - Map mappings = null; for (Map.Entry fieldEntry : indexEntry.getValue().entrySet()) { builder.startObject(fieldEntry.getKey()); builder.field("full_name", fieldEntry.getValue().fullName()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetMappingsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetMappingsResponseTests.java new file mode 100644 index 0000000000000..0601609a8a766 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/GetMappingsResponseTests.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indices; + +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.Predicate; + +import static org.elasticsearch.client.indices.GetMappingsResponse.MAPPINGS; +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class GetMappingsResponseTests extends ESTestCase { + + // Because the client-side class does not have a toXContent method, we test xContent serialization by creating + // a random client object, converting it to a server object then serializing it to xContent, and finally + // parsing it back as a client object. We check equality between the original client object, and the parsed one. + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + GetMappingsResponseTests::createTestInstance, + GetMappingsResponseTests::toXContent, + GetMappingsResponse::fromXContent) + .supportsUnknownFields(true) + .assertEqualsConsumer(GetMappingsResponseTests::assertEqualInstances) + .randomFieldsExcludeFilter(randomFieldsExcludeFilter()) + .test(); + } + + private static GetMappingsResponse createTestInstance() { + Map mappings = Collections.singletonMap( + "index-" + randomAlphaOfLength(5), randomMappingMetaData()); + return new GetMappingsResponse(mappings); + } + + private static void assertEqualInstances(GetMappingsResponse expected, GetMappingsResponse actual) { + assertEquals(expected.mappings(), actual.mappings()); + } + + private Predicate randomFieldsExcludeFilter() { + return field -> !field.equals(MAPPINGS.getPreferredName()); + } + + public static MappingMetaData randomMappingMetaData() { + Map mappings = new HashMap<>(); + + if (frequently()) { // rarely have no fields + mappings.put("field1", randomFieldMapping()); + if (randomBoolean()) { + mappings.put("field2", randomFieldMapping()); + } + } + + try { + return new MappingMetaData(MapperService.SINGLE_MAPPING_NAME, mappings); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private static Map randomFieldMapping() { + Map mappings = new HashMap<>(); + if (randomBoolean()) { + mappings.put("type", randomFrom("text", "keyword")); + mappings.put("index", "analyzed"); + mappings.put("analyzer", "english"); + } else { + mappings.put("type", randomFrom("integer", "float", "long", "double")); + mappings.put("index", Objects.toString(randomBoolean())); + } + return mappings; + } + + private static void toXContent(GetMappingsResponse response, XContentBuilder builder) throws IOException { + Params params = new ToXContent.MapParams( + Collections.singletonMap(BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER, "false")); + ImmutableOpenMap.Builder> allMappings = ImmutableOpenMap.builder(); + + for (Map.Entry indexEntry : response.mappings().entrySet()) { + ImmutableOpenMap.Builder mappings = ImmutableOpenMap.builder(); + mappings.put(MapperService.SINGLE_MAPPING_NAME, indexEntry.getValue()); + allMappings.put(indexEntry.getKey(), mappings.build()); + } + + org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse serverResponse = + new org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse(allMappings.build()); + + builder.startObject(); + serverResponse.toXContent(builder, params); + builder.endObject(); + } +} diff --git a/docs/java-rest/high-level/indices/get_mappings.asciidoc b/docs/java-rest/high-level/indices/get_mappings.asciidoc index a42a8ac77b338..516e0633f83c8 100644 --- a/docs/java-rest/high-level/indices/get_mappings.asciidoc +++ b/docs/java-rest/high-level/indices/get_mappings.asciidoc @@ -10,13 +10,13 @@ [id="{upid}-{api}-request"] ==== Get Mappings Request -A +{request}+ can have an optional list of indices and optional list of types: +A +{request}+ can have an optional list of indices: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- -<1> An empty request that will return all indices and types +<1> An empty request that will return all indices <2> Setting the indices to fetch mapping for ==== Optional arguments @@ -27,7 +27,6 @@ The following arguments can also optionally be provided: include-tagged::{doc-tests-file}[{api}-request-masterTimeout] -------------------------------------------------- <1> Timeout to connect to the master node as a `TimeValue` -<2> Timeout to connect to the master node as a `String` ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -48,5 +47,5 @@ executed operation as follows: include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- <1> Returning all indices' mappings -<2> Retrieving the mappings for a particular index and type +<2> Retrieving the mappings for a particular index <3> Getting the mappings as a Java Map diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java index 50b7a36426802..7abbea9c1d941 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.mapping.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.ParseField; @@ -31,7 +30,6 @@ import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import java.io.IOException; @@ -45,7 +43,7 @@ public class GetMappingsResponse extends ActionResponse implements ToXContentFra private ImmutableOpenMap> mappings = ImmutableOpenMap.of(); - GetMappingsResponse(ImmutableOpenMap> mappings) { + public GetMappingsResponse(ImmutableOpenMap> mappings) { this.mappings = mappings; } @@ -102,17 +100,20 @@ public static GetMappingsResponse fromXContent(XContentParser parser) throws IOE for (Map.Entry entry : parts.entrySet()) { final String indexName = entry.getKey(); assert entry.getValue() instanceof Map : "expected a map as type mapping, but got: " + entry.getValue().getClass(); + final Map mapping = (Map) ((Map) entry.getValue()).get(MAPPINGS.getPreferredName()); + ImmutableOpenMap.Builder typeBuilder = new ImmutableOpenMap.Builder<>(); - @SuppressWarnings("unchecked") - final Map fieldMappings = (Map) ((Map) entry.getValue()) - .get(MAPPINGS.getPreferredName()); - if (fieldMappings.isEmpty() == false) { - assert fieldMappings instanceof Map : "expected a map as inner type mapping, but got: " + fieldMappings.getClass(); - MappingMetaData mmd = new MappingMetaData(MapperService.SINGLE_MAPPING_NAME, fieldMappings); - typeBuilder.put(MapperService.SINGLE_MAPPING_NAME, mmd); + for (Map.Entry typeEntry : mapping.entrySet()) { + final String typeName = typeEntry.getKey(); + assert typeEntry.getValue() instanceof Map : "expected a map as inner type mapping, but got: " + + typeEntry.getValue().getClass(); + final Map fieldMappings = (Map) typeEntry.getValue(); + MappingMetaData mmd = new MappingMetaData(typeName, fieldMappings); + typeBuilder.put(typeName, mmd); } builder.put(indexName, typeBuilder.build()); } + return new GetMappingsResponse(builder.build()); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index 8826932e252ba..74f451ab30cd2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -60,8 +60,8 @@ public class RestGetMappingAction extends BaseRestHandler { private static final Logger logger = LogManager.getLogger(RestGetMappingAction.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); - static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Using include_type_name in get mapping requests is deprecated. " - + "The parameter will be removed in the next major version."; + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Using include_type_name in get" + + " mapping requests is deprecated. The parameter will be removed in the next major version."; public RestGetMappingAction(final Settings settings, final RestController controller) { super(settings); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java index 734de94b1c419..2b8db458eb82f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsResponseTests.java @@ -124,8 +124,8 @@ protected Predicate getRandomFieldsExcludeFilter() { } /** - * For now, we only unit test the legacy typed responses. This will soon no longer be the case, - * as we introduce support for typeless xContent parsing in {@link GetFieldMappingsResponse}. + * For xContent roundtrip testing we force the xContent output to still contain types because the parser + * still expects them. The new typeless parsing is implemented in the client side GetFieldMappingsResponse. */ @Override protected ToXContent.Params getToXContentParams() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java index 7d1a19c65ed52..677a7b4b7eced 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponseTests.java @@ -20,13 +20,11 @@ package org.elasticsearch.action.admin.indices.mapping.get; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.test.AbstractStreamableXContentTestCase; @@ -41,8 +39,6 @@ import java.util.Map; import java.util.Objects; -import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; - public class GetMappingsResponseTests extends AbstractStreamableXContentTestCase { @Override @@ -117,15 +113,20 @@ public static ImmutableOpenMap createMappingsForIndex(i return typeBuilder.build(); } + /** + * For xContent roundtrip testing we force the xContent output to still contain types because the parser + * still expects them. The new typeless parsing is implemented in the client side GetMappingsResponse. + */ @Override - protected GetMappingsResponse createTestInstance() { - return createTestInstance(true); + protected Params getToXContentParams() { + return new ToXContent.MapParams(Collections.singletonMap(BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER, "true")); } - private GetMappingsResponse createTestInstance(boolean randomTypeNames) { + @Override + protected GetMappingsResponse createTestInstance() { ImmutableOpenMap.Builder> indexBuilder = ImmutableOpenMap.builder(); int typeCount = rarely() ? 0 : 1; - indexBuilder.put("index-" + randomAlphaOfLength(5), createMappingsForIndex(typeCount, randomTypeNames)); + indexBuilder.put("index-" + randomAlphaOfLength(5), createMappingsForIndex(typeCount, randomBoolean())); GetMappingsResponse resp = new GetMappingsResponse(indexBuilder.build()); logger.debug("--> created: {}", resp); return resp; @@ -163,57 +164,4 @@ private static Map randomFieldMapping() { } return mappings; } - - @Override - protected GetMappingsResponse createXContextTestInstance(XContentType xContentType) { - // don't use random type names for XContent roundtrip tests because we cannot parse them back anymore - return createTestInstance(false); - } - - /** - * check that the "old" legacy response format with types works as expected - */ - public void testToXContentWithTypes() throws IOException { - Params params = new ToXContent.MapParams(Collections.singletonMap(BaseRestHandler.INCLUDE_TYPE_NAME_PARAMETER, "true")); - xContentTester(this::createParser, t -> createTestInstance(), params, this::fromXContentWithTypes) - .numberOfTestRuns(NUMBER_OF_TEST_RUNS) - .supportsUnknownFields(supportsUnknownFields()) - .shuffleFieldsExceptions(getShuffleFieldsExceptions()) - .randomFieldsExcludeFilter(getRandomFieldsExcludeFilter()) - .assertEqualsConsumer(this::assertEqualInstances) - .assertToXContentEquivalence(true) - .test(); - } - - /** - * including the pre-7.0 parsing code here to test that older HLRC clients using this can parse the responses that are - * returned when "include_type_name=true" - */ - private GetMappingsResponse fromXContentWithTypes(XContentParser parser) throws IOException { - if (parser.currentToken() == null) { - parser.nextToken(); - } - assert parser.currentToken() == XContentParser.Token.START_OBJECT; - Map parts = parser.map(); - - ImmutableOpenMap.Builder> builder = new ImmutableOpenMap.Builder<>(); - for (Map.Entry entry : parts.entrySet()) { - final String indexName = entry.getKey(); - assert entry.getValue() instanceof Map : "expected a map as type mapping, but got: " + entry.getValue().getClass(); - final Map mapping = (Map) ((Map) entry.getValue()).get("mappings"); - - ImmutableOpenMap.Builder typeBuilder = new ImmutableOpenMap.Builder<>(); - for (Map.Entry typeEntry : mapping.entrySet()) { - final String typeName = typeEntry.getKey(); - assert typeEntry.getValue() instanceof Map : "expected a map as inner type mapping, but got: " - + typeEntry.getValue().getClass(); - final Map fieldMappings = (Map) typeEntry.getValue(); - MappingMetaData mmd = new MappingMetaData(typeName, fieldMappings); - typeBuilder.put(typeName, mmd); - } - builder.put(indexName, typeBuilder.build()); - } - - return new GetMappingsResponse(builder.build()); - } } From 290c6637c2475320d6206cdafdec026f731777f4 Mon Sep 17 00:00:00 2001 From: Dimitrios Liappis Date: Mon, 28 Jan 2019 10:01:26 +0200 Subject: [PATCH 18/57] Refactor into appropriate uses of scheduleUnlessShuttingDown (#37709) Replace `threadPool().schedule()` / catch `EsRejectedExecutionException` pattern with direct calls to `ThreadPool#scheduleUnlessShuttingDown()`. Closes #36318 --- .../cluster/InternalClusterInfoService.java | 16 ++++++---------- .../transport/TransportKeepAlive.java | 11 +---------- .../ccr/action/ShardFollowTasksExecutor.java | 14 ++------------ 3 files changed, 9 insertions(+), 32 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index e8261ca9f09cf..8d78f9c838e38 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -130,17 +130,17 @@ public void onMaster() { if (logger.isTraceEnabled()) { logger.trace("I have been elected master, scheduling a ClusterInfoUpdateJob"); } + + // Submit a job that will start after DEFAULT_STARTING_INTERVAL, and reschedule itself after running + threadPool.scheduleUnlessShuttingDown(updateFrequency, executorName(), new SubmitReschedulingClusterInfoUpdatedJob()); + try { - // Submit a job that will start after DEFAULT_STARTING_INTERVAL, and reschedule itself after running - threadPool.schedule(updateFrequency, executorName(), new SubmitReschedulingClusterInfoUpdatedJob()); if (clusterService.state().getNodes().getDataNodes().size() > 1) { // Submit an info update job to be run immediately threadPool.executor(executorName()).execute(() -> maybeRefresh()); } } catch (EsRejectedExecutionException ex) { - if (logger.isDebugEnabled()) { - logger.debug("Couldn't schedule cluster info update task - node might be shutting down", ex); - } + logger.debug("Couldn't schedule cluster info update task - node might be shutting down", ex); } } @@ -223,11 +223,7 @@ public void run() { if (logger.isTraceEnabled()) { logger.trace("Scheduling next run for updating cluster info in: {}", updateFrequency.toString()); } - try { - threadPool.schedule(updateFrequency, executorName(), this); - } catch (EsRejectedExecutionException ex) { - logger.debug("Reschedule cluster info service was rejected", ex); - } + threadPool.scheduleUnlessShuttingDown(updateFrequency, executorName(), this); } } }); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java index b8d06e7e1174e..8f17377c2a29f 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractLifecycleRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; @@ -185,15 +184,7 @@ protected void doRunInLifecycle() { @Override protected void onAfterInLifecycle() { - try { - threadPool.schedule(pingInterval, ThreadPool.Names.GENERIC, this); - } catch (EsRejectedExecutionException ex) { - if (ex.isExecutorShutdown()) { - logger.debug("couldn't schedule new ping execution, executor is shutting down", ex); - } else { - throw ex; - } - } + threadPool.scheduleUnlessShuttingDown(pingInterval, ThreadPool.Names.GENERIC, this); } @Override diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 956171ba9b7c3..40aa90dcab5e0 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.engine.CommitStats; @@ -102,17 +101,8 @@ protected AllocatedPersistentTask createTask(long id, String type, String action Map headers) { ShardFollowTask params = taskInProgress.getParams(); Client followerClient = wrapClient(client, params.getHeaders()); - BiConsumer scheduler = (delay, command) -> { - try { - threadPool.schedule(delay, Ccr.CCR_THREAD_POOL_NAME, command); - } catch (EsRejectedExecutionException e) { - if (e.isExecutorShutdown()) { - logger.debug("couldn't schedule command, executor is shutting down", e); - } else { - throw e; - } - } - }; + BiConsumer scheduler = (delay, command) -> + threadPool.scheduleUnlessShuttingDown(delay, Ccr.CCR_THREAD_POOL_NAME, command); final String recordedLeaderShardHistoryUUID = getLeaderShardHistoryUUID(params); return new ShardFollowNodeTask(id, type, action, getDescription(taskInProgress), parentTaskId, headers, params, From a056804831d8ea316213e8fb2ab723a532f4086c Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 28 Jan 2019 09:24:32 +0100 Subject: [PATCH 19/57] Track total hits in tests that index more than 10,000 docs This change sets track_total_hits to true on a test that requires to check the total hits of a query that can return more than 10,000 docs. Closes #37895 --- .../recovery/RecoveryWhileUnderLoadIT.java | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java b/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java index d23239509f3b4..9220748f38c25 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java +++ b/server/src/test/java/org/elasticsearch/recovery/RecoveryWhileUnderLoadIT.java @@ -115,7 +115,6 @@ public void testRecoverWhileUnderLoadAllocateReplicasTest() throws Exception { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37895") public void testRecoverWhileUnderLoadAllocateReplicasRelocatePrimariesTest() throws Exception { logger.info("--> creating test index ..."); int numberOfShards = numberOfShards(); @@ -300,8 +299,11 @@ private void iterateAssertCount(final int numberOfShards, final int iterations, SearchResponse[] iterationResults = new SearchResponse[iterations]; boolean error = false; for (int i = 0; i < iterations; i++) { - SearchResponse searchResponse = client().prepareSearch().setSize((int) numberOfDocs).setQuery(matchAllQuery()) - .addSort("id", SortOrder.ASC).get(); + SearchResponse searchResponse = client().prepareSearch() + .setSize((int) numberOfDocs) + .setQuery(matchAllQuery()) + .setTrackTotalHits(true) + .addSort("id", SortOrder.ASC).get(); logSearchResponse(numberOfShards, numberOfDocs, i, searchResponse); iterationResults[i] = searchResponse; if (searchResponse.getHits().getTotalHits().value != numberOfDocs) { @@ -340,7 +342,11 @@ private void iterateAssertCount(final int numberOfShards, final int iterations, assertTrue(awaitBusy(() -> { boolean errorOccurred = false; for (int i = 0; i < iterations; i++) { - SearchResponse searchResponse = client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(); + SearchResponse searchResponse = client().prepareSearch() + .setTrackTotalHits(true) + .setSize(0) + .setQuery(matchAllQuery()) + .get(); if (searchResponse.getHits().getTotalHits().value != numberOfDocs) { errorOccurred = true; } From 4e1a779773311338c9a7a1336b1485fb2bd575a2 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 28 Jan 2019 09:30:04 +0100 Subject: [PATCH 20/57] Prepare ShardFollowNodeTask to bootstrap when it fall behind leader shard (#37562) * Changed `LuceneSnapshot` to throw an `OperationsMissingException` if the requested ops are missing. * Changed the shard changes api to handle the `OperationsMissingException` and wrap the exception into `ResourceNotFound` exception and include metadata to indicate the requested range can no longer be retrieved. * Changed `ShardFollowNodeTask` to handle this `ResourceNotFound` exception with the included metdata header. Relates to #35975 --- .../index/engine/LuceneChangesSnapshot.java | 4 +- .../MissingHistoryOperationsException.java | 31 ++++++++++++++ .../java/org/elasticsearch/xpack/ccr/Ccr.java | 2 + .../xpack/ccr/action/ShardChangesAction.java | 29 +++++-------- .../xpack/ccr/action/ShardFollowNodeTask.java | 22 ++++++++++ .../xpack/ccr/action/ShardChangesTests.java | 41 +++++++++++++++++-- 6 files changed, 105 insertions(+), 24 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/engine/MissingHistoryOperationsException.java diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java index bc20132f13d65..c9550a61f9e58 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -146,13 +146,13 @@ public Translog.Operation next() throws IOException { private void rangeCheck(Translog.Operation op) { if (op == null) { if (lastSeenSeqNo < toSeqNo) { - throw new IllegalStateException("Not all operations between from_seqno [" + fromSeqNo + "] " + + throw new MissingHistoryOperationsException("Not all operations between from_seqno [" + fromSeqNo + "] " + "and to_seqno [" + toSeqNo + "] found; prematurely terminated last_seen_seqno [" + lastSeenSeqNo + "]"); } } else { final long expectedSeqNo = lastSeenSeqNo + 1; if (op.seqNo() != expectedSeqNo) { - throw new IllegalStateException("Not all operations between from_seqno [" + fromSeqNo + "] " + + throw new MissingHistoryOperationsException("Not all operations between from_seqno [" + fromSeqNo + "] " + "and to_seqno [" + toSeqNo + "] found; expected seqno [" + expectedSeqNo + "]; found [" + op + "]"); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/MissingHistoryOperationsException.java b/server/src/main/java/org/elasticsearch/index/engine/MissingHistoryOperationsException.java new file mode 100644 index 0000000000000..8f2fa1e5b7375 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/MissingHistoryOperationsException.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +/** + * Exception indicating that not all requested operations from {@link LuceneChangesSnapshot} + * are available. + */ +public final class MissingHistoryOperationsException extends IllegalStateException { + + MissingHistoryOperationsException(String message) { + super(message); + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index acda8d06dc550..1345faaa5bcb0 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -118,6 +118,8 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E public static final String CCR_CUSTOM_METADATA_LEADER_INDEX_NAME_KEY = "leader_index_name"; public static final String CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY = "remote_cluster_name"; + public static final String REQUESTED_OPS_MISSING_METADATA_KEY = "es.requested_operations_missing"; + private final boolean enabled; private final Settings settings; private final CcrLicenseChecker ccrLicenseChecker; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index b56aa148efc78..2a4fb7bb402ee 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -6,8 +6,7 @@ package org.elasticsearch.xpack.ccr.action; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; @@ -27,6 +26,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.MissingHistoryOperationsException; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardNotStartedException; @@ -34,9 +34,9 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.Ccr; import java.io.IOException; import java.util.ArrayList; @@ -392,21 +392,6 @@ protected void asyncShardOperation( } } - @Override - protected void doExecute(Task task, Request request, ActionListener listener) { - ActionListener wrappedListener = ActionListener.wrap(listener::onResponse, e -> { - Throwable cause = ExceptionsHelper.unwrapCause(e); - if (cause instanceof IllegalStateException && cause.getMessage().contains("Not all operations between from_seqno [")) { - String message = "Operations are no longer available for replicating. Maybe increase the retention setting [" + - IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey() + "]?"; - listener.onFailure(new ElasticsearchException(message, e)); - } else { - listener.onFailure(e); - } - }); - super.doExecute(task, request, wrappedListener); - } - private void globalCheckpointAdvanced( final ShardId shardId, final long globalCheckpoint, @@ -525,6 +510,14 @@ static Translog.Operation[] getOperations( break; } } + } catch (MissingHistoryOperationsException e) { + String message = "Operations are no longer available for replicating. Maybe increase the retention setting [" + + IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey() + "]?"; + // Make it easy to detect this error in ShardFollowNodeTask: + // (adding a metadata header instead of introducing a new exception that extends ElasticsearchException) + ResourceNotFoundException wrapper = new ResourceNotFoundException(message, e); + wrapper.addMetadata(Ccr.REQUESTED_OPS_MISSING_METADATA_KEY, Long.toString(fromSeqNo), Long.toString(toSeqNo)); + throw wrapper; } return operations.toArray(EMPTY_OPERATIONS_ARRAY); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index 7d8e1fa884757..233085c0a6857 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -13,6 +13,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -30,6 +31,7 @@ import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.transport.NoSuchRemoteClusterException; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; @@ -275,6 +277,14 @@ private void sendShardChangesRequest(long from, int maxOperationCount, long maxR failedReadRequests++; fetchExceptions.put(from, Tuple.tuple(retryCounter, ExceptionsHelper.convertToElastic(e))); } + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof ResourceNotFoundException) { + ResourceNotFoundException resourceNotFoundException = (ResourceNotFoundException) cause; + if (resourceNotFoundException.getMetadataKeys().contains(Ccr.REQUESTED_OPS_MISSING_METADATA_KEY)) { + handleFallenBehindLeaderShard(e, from, maxOperationCount, maxRequiredSeqNo, retryCounter); + return; + } + } handleFailure(e, retryCounter, () -> sendShardChangesRequest(from, maxOperationCount, maxRequiredSeqNo, retryCounter)); }); } @@ -291,6 +301,18 @@ void handleReadResponse(long from, long maxRequiredSeqNo, ShardChangesAction.Res maybeUpdateSettings(response.getSettingsVersion(), updateMappingsTask); } + void handleFallenBehindLeaderShard(Exception e, long from, int maxOperationCount, long maxRequiredSeqNo, AtomicInteger retryCounter) { + // Do restore from repository here and after that + // start() should be invoked and stats should be reset + + // For now handle like any other failure: + // need a more robust approach to avoid the scenario where an outstanding request + // can trigger another restore while the shard was restored already. + // https://github.com/elastic/elasticsearch/pull/37562#discussion_r250009367 + + handleFailure(e, retryCounter, () -> sendShardChangesRequest(from, maxOperationCount, maxRequiredSeqNo, retryCounter)); + } + /** Called when some operations are fetched from the leading */ protected void onOperationsFetched(Translog.Operation[] operations) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java index 6a2a4baab1738..f42a50b91ff02 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesTests.java @@ -6,6 +6,9 @@ package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -16,12 +19,17 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.LocalStateCcr; import java.util.Collection; import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; public class ShardChangesTests extends ESSingleNodeTestCase { @@ -88,7 +96,7 @@ public void testGetOperationsBasedOnGlobalSequenceId() throws Exception { assertThat(operation.id(), equalTo("5")); } - public void testMissingOperations() { + public void testMissingOperations() throws Exception { client().admin().indices().prepareCreate("index") .setSettings(Settings.builder() .put("index.soft_deletes.enabled", true) @@ -113,9 +121,34 @@ public void testMissingOperations() { request.setFromSeqNo(0L); request.setMaxOperationCount(1); - Exception e = expectThrows(ElasticsearchException.class, () -> client().execute(ShardChangesAction.INSTANCE, request).actionGet()); - assertThat(e.getMessage(), equalTo("Operations are no longer available for replicating. Maybe increase the retention setting " + - "[index.soft_deletes.retention.operations]?")); + { + ResourceNotFoundException e = + expectThrows(ResourceNotFoundException.class, () -> client().execute(ShardChangesAction.INSTANCE, request).actionGet()); + assertThat(e.getMessage(), equalTo("Operations are no longer available for replicating. Maybe increase the retention setting " + + "[index.soft_deletes.retention.operations]?")); + + assertThat(e.getMetadataKeys().size(), equalTo(1)); + assertThat(e.getMetadata(Ccr.REQUESTED_OPS_MISSING_METADATA_KEY), notNullValue()); + assertThat(e.getMetadata(Ccr.REQUESTED_OPS_MISSING_METADATA_KEY), contains("0", "0")); + } + { + AtomicReference holder = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + client().execute(ShardChangesAction.INSTANCE, request, + new LatchedActionListener<>(ActionListener.wrap(r -> fail("expected an exception"), holder::set), latch)); + latch.await(); + + ElasticsearchException e = (ElasticsearchException) holder.get(); + assertThat(e, notNullValue()); + assertThat(e.getMetadataKeys().size(), equalTo(0)); + + ResourceNotFoundException cause = (ResourceNotFoundException) e.getCause(); + assertThat(cause.getMessage(), equalTo("Operations are no longer available for replicating. " + + "Maybe increase the retention setting [index.soft_deletes.retention.operations]?")); + assertThat(cause.getMetadataKeys().size(), equalTo(1)); + assertThat(cause.getMetadata(Ccr.REQUESTED_OPS_MISSING_METADATA_KEY), notNullValue()); + assertThat(cause.getMetadata(Ccr.REQUESTED_OPS_MISSING_METADATA_KEY), contains("0", "0")); + } } } From a5f578f7eab25f19ce351669fd80068a5ed5a85e Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 28 Jan 2019 09:29:07 +0000 Subject: [PATCH 21/57] Fix duplicate note x-refs in mapping.asciidoc (#37904) The docs silently accept duplicate note markers (such as `<3>` here) but formats them in an unexpected way. This change removes this duplication so that the rendered documentation looks as intended. --- docs/reference/mapping.asciidoc | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc index 5469a063a0303..b6a7052f69a0f 100644 --- a/docs/reference/mapping.asciidoc +++ b/docs/reference/mapping.asciidoc @@ -133,7 +133,7 @@ an <> field. [float] == Example mapping -A mapping could be specified when creating an index, as follows: +A mapping can be specified when creating an index, as follows: [source,js] --------------------------------------- @@ -142,10 +142,10 @@ PUT my_index <1> "mappings": { "properties": { <2> "title": { "type": "text" }, <3> - "name": { "type": "text" }, <3> - "age": { "type": "integer" }, <3> + "name": { "type": "text" }, <4> + "age": { "type": "integer" }, <5> "created": { - "type": "date", <3> + "type": "date", <6> "format": "strict_date_optional_time||epoch_millis" } } @@ -154,9 +154,11 @@ PUT my_index <1> --------------------------------------- // CONSOLE <1> Create an index called `my_index`. -<2> Specify fields or _properties_. -<3> Specify the data `type` and mapping for each field. - +<2> Specify the fields or _properties_ in the mapping. +<3> Specify that the `title` field contains `text` values. +<4> Specify that the `name` field contains `text` values. +<5> Specify that the `age` field contains `integer` values. +<6> Specify that the `created` field contains `date` values in two possible formats. -- From 57d321ed5f030cb6f17dba01b9d62e67133fc6d8 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Mon, 28 Jan 2019 09:38:46 +0000 Subject: [PATCH 22/57] [ML] Tighten up use of aliases rather than concrete indices (#37874) We have read and write aliases for the ML results indices. However, the job still had methods that purported to reliably return the name of the concrete results index being used by the job. After reindexing prior to upgrade to 7.x this will be wrong, so the method has been renamed and the comments made more explicit to say the returned index name may not be the actual concrete index name for the lifetime of the job. Additionally, the selection of indices when deleting the job has been changed so that it works regardless of concrete index names. All these changes are nice-to-have for 6.7 and 7.0, but will become critical if we add rolling results indices in the 7.x release stream as 6.7 and 7.0 nodes may have to operate in a mixed version cluster that includes a version that can roll results indices. --- .../xpack/core/ml/job/config/Job.java | 20 ++++--- .../xpack/core/ml/job/config/JobTests.java | 4 +- .../xpack/ml/integration/MlJobIT.java | 29 +++++++++- .../ml/action/TransportDeleteJobAction.java | 58 +++++++++++-------- .../ml/action/TransportOpenJobAction.java | 9 +-- .../job/persistence/JobResultsProvider.java | 4 +- .../action/TransportOpenJobActionTests.java | 2 +- 7 files changed, 84 insertions(+), 42 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java index a912b5d65f208..e6be2df0aed87 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Job.java @@ -265,18 +265,24 @@ public List getGroups() { } /** - * The name of the index storing the job's results and state. - * This defaults to {@link #getId()} if a specific index name is not set. - * @return The job's index name + * A good starting name for the index storing the job's results. + * This defaults to the shared results index if a specific index name is not set. + * This method must only be used during initial job creation. + * After that the read/write aliases must always be used to access the job's + * results index, as the underlying index may roll or be reindexed. + * @return The job's initial results index name */ - public String getResultsIndexName() { + public String getInitialResultsIndexName() { return AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + resultsIndexName; } /** - * Private version of getResultsIndexName so that a job can be built from another - * job and pass index name validation - * @return The job's index name, minus prefix + * Get the unmodified results_index_name field from the job. + * This is provided to allow a job to be copied via the builder. + * After creation this does not necessarily reflect the actual concrete + * index used by the job. A job's results must always be read and written + * using the read and write aliases. + * @return The job's configured "index name" */ private String getResultsIndexNameNoPrefix() { return resultsIndexName; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java index 1ba774e4231cd..fc93f5c02a920 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java @@ -416,14 +416,14 @@ public void testBuilder_setsDefaultIndexName() { Job.Builder builder = buildJobBuilder("foo"); Job job = builder.build(); assertEquals(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT, - job.getResultsIndexName()); + job.getInitialResultsIndexName()); } public void testBuilder_setsIndexName() { Job.Builder builder = buildJobBuilder("foo"); builder.setResultsIndexName("carol"); Job job = builder.build(); - assertEquals(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-carol", job.getResultsIndexName()); + assertEquals(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + "custom-carol", job.getInitialResultsIndexName()); } public void testBuilder_withInvalidIndexNameThrows() { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java index 9f38791bb9f07..66bbe908fd004 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java @@ -17,6 +17,7 @@ import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.ml.integration.MlRestTestStateCleaner; +import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; import org.elasticsearch.xpack.ml.MachineLearning; @@ -57,7 +58,7 @@ public void testPutJob_GivenFarequoteConfig() throws Exception { assertThat(responseAsString, containsString("\"job_id\":\"given-farequote-config-job\"")); } - public void testGetJob_GivenNoSuchJob() throws Exception { + public void testGetJob_GivenNoSuchJob() { ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/non-existing-job/_stats"))); @@ -519,8 +520,30 @@ public void testMultiIndexDelete() throws Exception { String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; createFarequoteJob(jobId); - client().performRequest(new Request("PUT", indexName + "-001")); - client().performRequest(new Request("PUT", indexName + "-002")); + // Make the job's results span an extra two indices, i.e. three in total. + // To do this the job's results alias needs to encompass all three indices. + Request extraIndex1 = new Request("PUT", indexName + "-001"); + extraIndex1.setJsonEntity("{\n" + + " \"aliases\" : {\n" + + " \"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId)+ "\" : {\n" + + " \"filter\" : {\n" + + " \"term\" : {\"" + Job.ID + "\" : \"" + jobId + "\" }\n" + + " }\n" + + " }\n" + + " }\n" + + "}"); + client().performRequest(extraIndex1); + Request extraIndex2 = new Request("PUT", indexName + "-002"); + extraIndex2.setJsonEntity("{\n" + + " \"aliases\" : {\n" + + " \"" + AnomalyDetectorsIndex.jobResultsAliasedName(jobId)+ "\" : {\n" + + " \"filter\" : {\n" + + " \"term\" : {\"" + Job.ID + "\" : \"" + jobId + "\" }\n" + + " }\n" + + " }\n" + + " }\n" + + "}"); + client().performRequest(extraIndex2); String indicesBeforeDelete = EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices")).getEntity()); assertThat(indicesBeforeDelete, containsString(indexName)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index 876f2cd1aaccd..90d8c6e677a83 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -267,26 +267,25 @@ private void normalDeleteJob(ParentTaskAssigningClient parentTaskClient, DeleteJ private void deleteJobDocuments(ParentTaskAssigningClient parentTaskClient, String jobId, CheckedConsumer finishedHandler, Consumer failureHandler) { - AtomicReference indexName = new AtomicReference<>(); + AtomicReference indexNames = new AtomicReference<>(); final ActionListener completionHandler = ActionListener.wrap( response -> finishedHandler.accept(response.isAcknowledged()), failureHandler); - // Step 8. If we did not drop the index and after DBQ state done, we delete the aliases + // Step 8. If we did not drop the indices and after DBQ state done, we delete the aliases ActionListener dbqHandler = ActionListener.wrap( bulkByScrollResponse -> { - if (bulkByScrollResponse == null) { // no action was taken by DBQ, assume Index was deleted + if (bulkByScrollResponse == null) { // no action was taken by DBQ, assume indices were deleted completionHandler.onResponse(new AcknowledgedResponse(true)); } else { if (bulkByScrollResponse.isTimedOut()) { - logger.warn("[{}] DeleteByQuery for indices [{}, {}] timed out.", jobId, indexName.get(), - indexName.get() + "-*"); + logger.warn("[{}] DeleteByQuery for indices [{}] timed out.", jobId, String.join(", ", indexNames.get())); } if (!bulkByScrollResponse.getBulkFailures().isEmpty()) { - logger.warn("[{}] {} failures and {} conflicts encountered while running DeleteByQuery on indices [{}, {}].", + logger.warn("[{}] {} failures and {} conflicts encountered while running DeleteByQuery on indices [{}].", jobId, bulkByScrollResponse.getBulkFailures().size(), bulkByScrollResponse.getVersionConflicts(), - indexName.get(), indexName.get() + "-*"); + String.join(", ", indexNames.get())); for (BulkItemResponse.Failure failure : bulkByScrollResponse.getBulkFailures()) { logger.warn("DBQ failure: " + failure); } @@ -296,13 +295,12 @@ private void deleteJobDocuments(ParentTaskAssigningClient parentTaskClient, Stri }, failureHandler); - // Step 7. If we did not delete the index, we run a delete by query + // Step 7. If we did not delete the indices, we run a delete by query ActionListener deleteByQueryExecutor = ActionListener.wrap( response -> { - if (response) { - String indexPattern = indexName.get() + "-*"; - logger.info("Running DBQ on [" + indexName.get() + "," + indexPattern + "] for job [" + jobId + "]"); - DeleteByQueryRequest request = new DeleteByQueryRequest(indexName.get(), indexPattern); + if (response && indexNames.get().length > 0) { + logger.info("Running DBQ on [" + String.join(", ", indexNames.get()) + "] for job [" + jobId + "]"); + DeleteByQueryRequest request = new DeleteByQueryRequest(indexNames.get()); ConstantScoreQueryBuilder query = new ConstantScoreQueryBuilder(new TermQueryBuilder(Job.ID.getPreferredName(), jobId)); request.setQuery(query); @@ -318,15 +316,15 @@ private void deleteJobDocuments(ParentTaskAssigningClient parentTaskClient, Stri }, failureHandler); - // Step 6. If we have any hits, that means we are NOT the only job on this index, and should not delete it - // if we do not have any hits, we can drop the index and then skip the DBQ and alias deletion + // Step 6. If we have any hits, that means we are NOT the only job on these indices, and should not delete the indices. + // If we do not have any hits, we can drop the indices and then skip the DBQ and alias deletion. ActionListener customIndexSearchHandler = ActionListener.wrap( searchResponse -> { if (searchResponse == null || searchResponse.getHits().getTotalHits().value > 0) { deleteByQueryExecutor.onResponse(true); // We need to run DBQ and alias deletion } else { - logger.info("Running DELETE Index on [" + indexName.get() + "] for job [" + jobId + "]"); - DeleteIndexRequest request = new DeleteIndexRequest(indexName.get()); + logger.info("Running DELETE Index on [" + String.join(", ", indexNames.get()) + "] for job [" + jobId + "]"); + DeleteIndexRequest request = new DeleteIndexRequest(indexNames.get()); request.indicesOptions(IndicesOptions.lenientExpandOpen()); // If we have deleted the index, then we don't need to delete the aliases or run the DBQ executeAsyncWithOrigin( @@ -348,14 +346,28 @@ private void deleteJobDocuments(ParentTaskAssigningClient parentTaskClient, Stri } ); - // Step 5. Determine if we are on a shared index by looking at `.ml-anomalies-shared` or the custom index's aliases + // Step 5. Determine if we are on shared indices by looking at whether the initial index was ".ml-anomalies-shared" + // or whether the indices that the job's results alias points to contain any documents from other jobs. + // TODO: this check is currently assuming that a job's results indices are either ALL shared or ALL + // dedicated to the job. We have considered functionality like rolling jobs that generate large + // volumes of results from shared to dedicated indices. On deletion such a job would have a mix of + // shared indices requiring DBQ and dedicated indices that could be simply dropped. The current + // functionality would apply DBQ to all these indices, which is safe but suboptimal. So this functionality + // should be revisited when we add rolling results index functionality, especially if we add the ability + // to switch a job over to a dedicated index for future results. ActionListener getJobHandler = ActionListener.wrap( builder -> { Job job = builder.build(); - indexName.set(job.getResultsIndexName()); - if (indexName.get().equals(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + - AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT)) { - //don't bother searching the index any further, we are on the default shared + indexNames.set(indexNameExpressionResolver.concreteIndexNames(clusterService.state(), + IndicesOptions.lenientExpandOpen(), AnomalyDetectorsIndex.jobResultsAliasedName(jobId))); + // The job may no longer be using the initial shared index, but if it started off on a + // shared index then it will still be on a shared index even if it's been reindexed + if (job.getInitialResultsIndexName() + .equals(AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT)) { + // don't bother searching the index any further, we are on the default shared + customIndexSearchHandler.onResponse(null); + } else if (indexNames.get().length == 0) { + // don't bother searching the index any further - it's already been closed or deleted customIndexSearchHandler.onResponse(null); } else { SearchSourceBuilder source = new SearchSourceBuilder() @@ -364,7 +376,7 @@ private void deleteJobDocuments(ParentTaskAssigningClient parentTaskClient, Stri .query(QueryBuilders.boolQuery().filter( QueryBuilders.boolQuery().mustNot(QueryBuilders.termQuery(Job.ID.getPreferredName(), jobId)))); - SearchRequest searchRequest = new SearchRequest(indexName.get()); + SearchRequest searchRequest = new SearchRequest(indexNames.get()); searchRequest.source(source); executeAsyncWithOrigin(parentTaskClient, ML_ORIGIN, SearchAction.INSTANCE, searchRequest, customIndexSearchHandler); } @@ -372,7 +384,7 @@ private void deleteJobDocuments(ParentTaskAssigningClient parentTaskClient, Stri failureHandler ); - // Step 4. Get the job as the result index name is required + // Step 4. Get the job as the initial result index name is required ActionListener deleteCategorizerStateHandler = ActionListener.wrap( response -> { jobConfigProvider.getJob(jobId, getJobHandler); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 820da6a621356..36381a5837864 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -135,8 +135,8 @@ static PersistentTasksCustomMetaData.Assignment selectLeastLoadedMlNode(String j int maxMachineMemoryPercent, MlMemoryTracker memoryTracker, Logger logger) { - String resultsIndexName = job.getResultsIndexName(); - List unavailableIndices = verifyIndicesPrimaryShardsAreActive(resultsIndexName, clusterState); + String resultsWriteAlias = AnomalyDetectorsIndex.resultsWriteAlias(jobId); + List unavailableIndices = verifyIndicesPrimaryShardsAreActive(resultsWriteAlias, clusterState); if (unavailableIndices.size() != 0) { String reason = "Not opening job [" + jobId + "], because not all primary shards are active for the following indices [" + String.join(",", unavailableIndices) + "]"; @@ -359,9 +359,10 @@ static String[] indicesOfInterest(String resultsIndex) { return new String[]{AnomalyDetectorsIndex.jobStateIndexPattern(), resultsIndex, MlMetaIndex.INDEX_NAME}; } - static List verifyIndicesPrimaryShardsAreActive(String resultsIndex, ClusterState clusterState) { + static List verifyIndicesPrimaryShardsAreActive(String resultsWriteIndex, ClusterState clusterState) { IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(); - String[] indices = resolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), indicesOfInterest(resultsIndex)); + String[] indices = resolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), + indicesOfInterest(resultsWriteIndex)); List unavailableIndices = new ArrayList<>(indices.length); for (String index : indices) { // Indices are created on demand from templates. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java index b942c49c14e73..5ae1cafc9c461 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java @@ -166,7 +166,7 @@ public void checkForLeftOverDocuments(Job job, ActionListener listener) .setQuery(QueryBuilders.idsQuery().addIds(Quantiles.documentId(job.getId()), Quantiles.v54DocumentId(job.getId()))) .setIndicesOptions(IndicesOptions.strictExpand()); - String resultsIndexName = job.getResultsIndexName(); + String resultsIndexName = job.getInitialResultsIndexName(); SearchRequestBuilder resultDocSearch = client.prepareSearch(resultsIndexName) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) .setQuery(QueryBuilders.termQuery(Job.ID.getPreferredName(), job.getId())) @@ -252,7 +252,7 @@ public void createJobResultIndex(Job job, ClusterState state, final ActionListen String readAliasName = AnomalyDetectorsIndex.jobResultsAliasedName(job.getId()); String writeAliasName = AnomalyDetectorsIndex.resultsWriteAlias(job.getId()); - String indexName = job.getResultsIndexName(); + String indexName = job.getInitialResultsIndexName(); final ActionListener createAliasListener = ActionListener.wrap(success -> { final IndicesAliasesRequest request = client.admin().indices().prepareAliases() diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index da54b33d27597..b23e042609030 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -347,7 +347,7 @@ public void testSelectLeastLoadedMlNode_noCompatibleJobTypeNodes() { when(job.getId()).thenReturn("incompatible_type_job"); when(job.getJobVersion()).thenReturn(Version.CURRENT); when(job.getJobType()).thenReturn("incompatible_type"); - when(job.getResultsIndexName()).thenReturn("shared"); + when(job.getInitialResultsIndexName()).thenReturn("shared"); cs.nodes(nodes); metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks); From 2a610abef2b76314c750035a39c5accb984abb45 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 28 Jan 2019 09:45:55 +0000 Subject: [PATCH 23/57] Document that auto_create_index is dynamic (#37903) We changed the `action.auto_create_index` setting to be a dynamic cluster-level setting in #20274 but today the reference manual indicates that it is still a static node-level setting. This commit addresses this, and clarifies the semantics of patterns that may both permit and forbid the creation of certain indices. Relates #7513 --- docs/reference/docs/index_.asciidoc | 70 ++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 22 deletions(-) diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index f706aaca3e034..c7ca42bfaf4c4 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -56,29 +56,55 @@ NOTE: Replica shards may not all be started when an indexing operation success [[index-creation]] === Automatic Index Creation -The index operation automatically creates an index if it has not been -created before (check out the -<> for manually -creating an index), and also automatically creates a -dynamic mapping if one has not yet been -created (check out the <> -API for manually creating a mapping). - -The mapping itself is very flexible and is schema-free. New fields and -objects will automatically be added to the mapping definition. -Check out the <> -section for more information on mapping definitions. - -Automatic index creation can be disabled by setting -`action.auto_create_index` to `false` in the config file of all nodes, -or via the cluster update settings API. -Automatic mapping creation can be disabled by setting -`index.mapper.dynamic` to `false` per-index as an index setting. - -Automatic index creation can include a pattern based white/black list, -for example, set `action.auto_create_index` to `+aaa*,-bbb*,+ccc*,-*` (+ -meaning allowed, and - meaning disallowed). +The index operation automatically creates an index if it does not already +exist, and applies any <> that are +configured. The index operation also creates a dynamic mapping if one does not +already exist. By default, new fields and objects will automatically be added +to the mapping definition if needed. Check out the <> section +for more information on mapping definitions, and the the +<> API for information about updating mappings +manually. + +Automatic index creation is controlled by the `action.auto_create_index` +setting. This setting defaults to `true`, meaning that indices are always +automatically created. Automatic index creation can be permitted only for +indices matching certain patterns by changing the value of this setting to a +comma-separated list of these patterns. It can also be explicitly permitted and +forbidden by prefixing patterns in the list with a `+` or `-`. Finally it can +be completely disabled by changing this setting to `false`. +[source,js] +-------------------------------------------------- +PUT _cluster/settings +{ + "persistent": { + "action.auto_create_index": "twitter,index10,-index1*,+ind*" <1> + } +} + +PUT _cluster/settings +{ + "persistent": { + "action.auto_create_index": "false" <2> + } +} + +PUT _cluster/settings +{ + "persistent": { + "action.auto_create_index": "true" <3> + } +} +-------------------------------------------------- +// CONSOLE + +<1> Permit only the auto-creation of indices called `twitter`, `index10`, no +other index matching `index1*`, and any other index matching `ind*`. The +patterns are matched in the order in which they are given. + +<2> Completely disable the auto-creation of indices. + +<3> Permit the auto-creation of indices with any name. This is the default. [float] [[operation-type]] From c0409fb9f03f14e29160a2c57cbf203e168f51aa Mon Sep 17 00:00:00 2001 From: David Kyle Date: Mon, 28 Jan 2019 10:00:59 +0000 Subject: [PATCH 24/57] [ML] Marginal gains in slow multi node QA tests (#37825) Move 2 tests that are simple rest tests and out of the QA suite and cut the number of post data calls in ForecastIT --- .../xpack/ml/integration/CloseJobsIT.java | 57 ---------------- .../xpack/ml/integration/ForecastIT.java | 20 +++--- .../ReopenJobResetsFinishedTimeIT.java | 62 ------------------ .../rest-api-spec/api/ml.close_job.json | 4 ++ .../rest-api-spec/test/ml/jobs_crud.yml | 65 +++++++++++++++++-- 5 files changed, 75 insertions(+), 133 deletions(-) delete mode 100644 x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CloseJobsIT.java delete mode 100644 x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ReopenJobResetsFinishedTimeIT.java diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CloseJobsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CloseJobsIT.java deleted file mode 100644 index 95ec9728842c6..0000000000000 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/CloseJobsIT.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.integration; - -import org.elasticsearch.client.Request; -import org.elasticsearch.client.RequestOptions; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.test.rest.ESRestTestCase; -import org.elasticsearch.xpack.ml.MachineLearning; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.nio.charset.StandardCharsets; -import java.util.stream.Collectors; - -import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.hamcrest.Matchers.equalTo; - -public class CloseJobsIT extends ESRestTestCase { - - private static final String BASIC_AUTH_VALUE = basicAuthHeaderValue("x_pack_rest_user", - SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); - - @Override - protected Settings restClientSettings() { - return Settings.builder().put(super.restClientSettings()).put(ThreadContext.PREFIX + ".Authorization", BASIC_AUTH_VALUE).build(); - } - - public void testCloseJobsAcceptsOptionsFromPayload() throws Exception { - - Request request = new Request("post", MachineLearning.BASE_PATH + "anomaly_detectors/" + "job-that-doesnot-exist*" + "/_close"); - request.setJsonEntity("{\"allow_no_jobs\":false}"); - request.setOptions(RequestOptions.DEFAULT); - ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest(request)); - assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(404)); - - request.setJsonEntity("{\"allow_no_jobs\":true}"); - Response response = client().performRequest(request); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - String responseAsString = responseEntityToString(response); - assertEquals(responseAsString, "{\"closed\":true}"); - } - - private static String responseEntityToString(Response response) throws IOException { - try (BufferedReader reader = new BufferedReader(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8))) { - return reader.lines().collect(Collectors.joining("\n")); - } - } -} diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java index 2d8c6a4128b8d..50554fdd05af7 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java @@ -37,7 +37,7 @@ public class ForecastIT extends MlNativeAutodetectIntegTestCase { @After - public void tearDownData() throws Exception { + public void tearDownData() { cleanUp(); } @@ -142,7 +142,7 @@ public void testSingleSeries() throws Exception { } } - public void testDurationCannotBeLessThanBucketSpan() throws Exception { + public void testDurationCannotBeLessThanBucketSpan() { Detector.Builder detector = new Detector.Builder("mean", "value"); TimeValue bucketSpan = TimeValue.timeValueHours(1); @@ -163,7 +163,7 @@ public void testDurationCannotBeLessThanBucketSpan() throws Exception { equalTo("[duration] must be greater or equal to the bucket span: [10m/1h]")); } - public void testNoData() throws Exception { + public void testNoData() { Detector.Builder detector = new Detector.Builder("mean", "value"); TimeValue bucketSpan = TimeValue.timeValueMinutes(1); @@ -381,20 +381,20 @@ private void createDataWithLotsOfClientIps(TimeValue bucketSpan, Job.Builder job long now = Instant.now().getEpochSecond(); long timestamp = now - 15 * bucketSpan.seconds(); + List data = new ArrayList<>(); for (int h = 0; h < 15; h++) { + double value = 10.0 + h; for (int i = 1; i < 101; i++) { - List data = new ArrayList<>(); for (int j = 1; j < 81; j++) { - Map record = new HashMap<>(); - record.put("time", timestamp); - record.put("value", 10.0 + h); - record.put("clientIP", String.format(Locale.ROOT, "192.168.%d.%d", i, j)); - data.add(createJsonRecord(record)); + String json = String.format(Locale.ROOT, "{\"time\": %d, \"value\": %f, \"clientIP\": \"192.168.%d.%d\"}\n", + timestamp, value, i, j); + data.add(json); } - postData(job.getId(), data.stream().collect(Collectors.joining())); } timestamp += bucketSpan.seconds(); } + + postData(job.getId(), data.stream().collect(Collectors.joining())); flushJob(job.getId(), false); } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ReopenJobResetsFinishedTimeIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ReopenJobResetsFinishedTimeIT.java deleted file mode 100644 index 325b1370315ca..0000000000000 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ReopenJobResetsFinishedTimeIT.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml.integration; - -import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; -import org.elasticsearch.xpack.core.ml.job.config.DataDescription; -import org.elasticsearch.xpack.core.ml.job.config.Detector; -import org.elasticsearch.xpack.core.ml.job.config.Job; -import org.junit.After; - -import java.util.Collections; - -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.Matchers.is; - -public class ReopenJobResetsFinishedTimeIT extends MlNativeAutodetectIntegTestCase { - - @After - public void cleanUpTest() { - cleanUp(); - } - - public void test() { - final String jobId = "reset-finished-time-test"; - Job.Builder job = createJob(jobId); - - registerJob(job); - putJob(job); - openJob(job.getId()); - - assertThat(getSingleJob(jobId).getFinishedTime(), is(nullValue())); - - closeJob(jobId); - assertThat(getSingleJob(jobId).getFinishedTime(), is(notNullValue())); - - openJob(jobId); - assertThat(getSingleJob(jobId).getFinishedTime(), is(nullValue())); - } - - private Job getSingleJob(String jobId) { - return getJob(jobId).get(0); - } - - private Job.Builder createJob(String id) { - DataDescription.Builder dataDescription = new DataDescription.Builder(); - dataDescription.setFormat(DataDescription.DataFormat.XCONTENT); - dataDescription.setTimeFormat(DataDescription.EPOCH_MS); - - Detector.Builder d = new Detector.Builder("count", null); - AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(d.build())); - - Job.Builder builder = new Job.Builder(); - builder.setId(id); - builder.setAnalysisConfig(analysisConfig); - builder.setDataDescription(dataDescription); - return builder; - } -} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.close_job.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.close_job.json index e0b0cd646d798..ab32b89f9c264 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.close_job.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.close_job.json @@ -28,6 +28,10 @@ "description": "Controls the time to wait until a job has closed. Default to 30 minutes" } } + }, + "body": { + "description" : "The URL params optionally sent in the body", + "required": false } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml index 1ab385aee9162..82831b882226a 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/jobs_crud.yml @@ -447,7 +447,6 @@ Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: jobs-crud-model-memory-limit-decrease_model_size_stats_1517443200000 body: job_id: jobs-crud-model-memory-limit-decrease @@ -908,7 +907,6 @@ - do: index: index: .ml-state - type: doc id: jobs-crud-existing-docs_categorizer_state#1 body: key: value @@ -936,7 +934,6 @@ - do: index: index: .ml-state - type: doc id: jobs-crud-existing-docs_quantiles body: key: value @@ -968,7 +965,6 @@ Content-Type: application/json index: index: .ml-anomalies-shared - type: doc id: "jobs-crud-existing-result-docs_1464739200000_1" body: { @@ -1393,3 +1389,64 @@ ml.open_job: job_id: persistent-task-allocation-allowed-test - match: { opened: true } + +--- +"Test reopen job resets the finished time": + - do: + ml.put_job: + job_id: jobs-crud-reset-finished-time + body: > + { + "analysis_config" : { + "detectors" :[{"function":"count"}] + }, + "data_description" : { + } + } + - match: { job_id: jobs-crud-reset-finished-time } + + - do: + ml.open_job: + job_id: jobs-crud-reset-finished-time + + - do: + ml.get_jobs: + job_id: jobs-crud-reset-finished-time + - is_false: jobs.0.finished_time + + - do: + ml.close_job: + job_id: jobs-crud-reset-finished-time + + - do: + ml.get_jobs: + job_id: jobs-crud-reset-finished-time + - is_true: jobs.0.finished_time + + - do: + ml.open_job: + job_id: jobs-crud-reset-finished-time + + - do: + ml.get_jobs: + job_id: jobs-crud-reset-finished-time + - is_false: jobs.0.finished_time + +--- +"Test close job with body params": + - do: + catch: missing + ml.close_job: + job_id: job-that-doesnot-exist* + body: > + { + "allow_no_jobs" : false + } + + - do: + ml.close_job: + job_id: job-that-doesnot-exist* + body: > + { + "allow_no_jobs" : true + } From 758eb9d451eebd90d787c1e6ec70a49b4dd20c73 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 28 Jan 2019 11:30:20 +0100 Subject: [PATCH 25/57] Track accurate total hits in CloseIndexIT The test was not using the TRACK_TOTAL_HITS_ACCURATE and thus encountered a different issue tracked in #37907. In the meanwhile we can adapt the test to not fail anymore. Closes #37897 --- .../java/org/elasticsearch/indices/state/CloseIndexIT.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java index a56a65f6e4deb..1d32283c6cb94 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -41,6 +41,7 @@ import static java.util.Collections.emptySet; import static java.util.stream.Collectors.toList; import static org.elasticsearch.action.support.IndicesOptions.lenientExpandOpen; +import static org.elasticsearch.search.internal.SearchContext.TRACK_TOTAL_HITS_ACCURATE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; @@ -188,7 +189,7 @@ public void testCloseWhileIndexingDocuments() throws Exception { assertIndexIsClosed(indexName); assertAcked(client().admin().indices().prepareOpen(indexName)); - assertHitCount(client().prepareSearch(indexName).setSize(0).setTrackTotalHitsUpTo(MAX_DOCS).get(), nbDocs); + assertHitCount(client().prepareSearch(indexName).setSize(0).setTrackTotalHitsUpTo(TRACK_TOTAL_HITS_ACCURATE).get(), nbDocs); } public void testCloseWhileDeletingIndices() throws Exception { @@ -301,7 +302,7 @@ public void testConcurrentClosesAndOpens() throws Exception { } refresh(indexName); assertIndexIsOpened(indexName); - assertHitCount(client().prepareSearch(indexName).setSize(0).setTrackTotalHitsUpTo(MAX_DOCS).get(), + assertHitCount(client().prepareSearch(indexName).setSize(0).setTrackTotalHitsUpTo(TRACK_TOTAL_HITS_ACCURATE).get(), indexer.totalIndexedDocs()); } From 194cdfe20815bdd99c45cf06882445f3333e2df9 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 28 Jan 2019 07:11:51 -0500 Subject: [PATCH 26/57] Sync retention leases on expiration (#37902) This commit introduces a sync of retention leases when a retention lease expires. As expiration of retention leases is lazy, their expiration is managed only when getting the current retention leases from the replication tracker. At this point, we callback to our full retention lease sync to sync and flush these on all shard copies. With this change, replicas do not locally manage expiration of retention leases; instead, that is done only on the primary. --- .../index/seqno/ReplicationTracker.java | 70 ++++++--- ...ReplicationTrackerRetentionLeaseTests.java | 136 ++++++++++++++++-- .../index/seqno/RetentionLeaseSyncIT.java | 75 ++++++++++ .../shard/IndexShardRetentionLeaseTests.java | 53 +++++-- 4 files changed, 284 insertions(+), 50 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 7e85602289205..4a614d8874aff 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -147,10 +147,10 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L private final LongSupplier currentTimeMillisSupplier; /** - * A callback when a new retention lease is created. In practice, this callback invokes the retention lease sync action, to sync - * retention leases to replicas. + * A callback when a new retention lease is created or an existing retention lease expires. In practice, this callback invokes the + * retention lease sync action, to sync retention leases to replicas. */ - private final BiConsumer, ActionListener> onNewRetentionLease; + private final BiConsumer, ActionListener> onSyncRetentionLeases; /** * This set contains allocation IDs for which there is a thread actively waiting for the local checkpoint to advance to at least the @@ -171,21 +171,45 @@ private Collection copyRetentionLeases() { } /** - * Get all non-expired retention leases tracked on this shard. An unmodifiable copy of the retention leases is returned. + * Get all non-expired retention leases tracked on this shard. An unmodifiable copy of the retention leases is returned. Note that only + * the primary shard calculates which leases are expired, and if any have expired, syncs the retention leases to any replicas. * * @return the retention leases */ - public synchronized Collection getRetentionLeases() { - final long currentTimeMillis = currentTimeMillisSupplier.getAsLong(); - final long retentionLeaseMillis = indexSettings.getRetentionLeaseMillis(); - final Collection nonExpiredRetentionLeases = retentionLeases - .values() - .stream() - .filter(retentionLease -> currentTimeMillis - retentionLease.timestamp() <= retentionLeaseMillis) - .collect(Collectors.toList()); - retentionLeases.clear(); - retentionLeases.putAll(nonExpiredRetentionLeases.stream().collect(Collectors.toMap(RetentionLease::id, lease -> lease))); - return Collections.unmodifiableCollection(nonExpiredRetentionLeases); + public Collection getRetentionLeases() { + final boolean wasPrimaryMode; + final Collection nonExpiredRetentionLeases; + synchronized (this) { + if (primaryMode) { + // the primary calculates the non-expired retention leases and syncs them to replicas + final long currentTimeMillis = currentTimeMillisSupplier.getAsLong(); + final long retentionLeaseMillis = indexSettings.getRetentionLeaseMillis(); + final Collection expiredRetentionLeases = retentionLeases + .values() + .stream() + .filter(retentionLease -> currentTimeMillis - retentionLease.timestamp() > retentionLeaseMillis) + .collect(Collectors.toList()); + if (expiredRetentionLeases.isEmpty()) { + // early out as no retention leases have expired + return copyRetentionLeases(); + } + // clean up the expired retention leases + for (final RetentionLease expiredRetentionLease : expiredRetentionLeases) { + retentionLeases.remove(expiredRetentionLease.id()); + } + } + /* + * At this point, we were either in primary mode and have updated the non-expired retention leases into the tracking map, or + * we were in replica mode and merely need to copy the existing retention leases since a replica does not calculate the + * non-expired retention leases, instead receiving them on syncs from the primary. + */ + wasPrimaryMode = primaryMode; + nonExpiredRetentionLeases = copyRetentionLeases(); + } + if (wasPrimaryMode) { + onSyncRetentionLeases.accept(nonExpiredRetentionLeases, ActionListener.wrap(() -> {})); + } + return nonExpiredRetentionLeases; } /** @@ -215,7 +239,7 @@ public RetentionLease addRetentionLease( retentionLeases.put(id, retentionLease); currentRetentionLeases = copyRetentionLeases(); } - onNewRetentionLease.accept(currentRetentionLeases, listener); + onSyncRetentionLeases.accept(currentRetentionLeases, listener); return retentionLease; } @@ -500,11 +524,11 @@ private static long inSyncCheckpointStates( * Initialize the global checkpoint service. The specified global checkpoint should be set to the last known global checkpoint, or * {@link SequenceNumbers#UNASSIGNED_SEQ_NO}. * - * @param shardId the shard ID - * @param allocationId the allocation ID - * @param indexSettings the index settings - * @param globalCheckpoint the last known global checkpoint for this shard, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} - * @param onNewRetentionLease a callback when a new retention lease is created + * @param shardId the shard ID + * @param allocationId the allocation ID + * @param indexSettings the index settings + * @param globalCheckpoint the last known global checkpoint for this shard, or {@link SequenceNumbers#UNASSIGNED_SEQ_NO} + * @param onSyncRetentionLeases a callback when a new retention lease is created or an existing retention lease expires */ public ReplicationTracker( final ShardId shardId, @@ -513,7 +537,7 @@ public ReplicationTracker( final long globalCheckpoint, final LongConsumer onGlobalCheckpointUpdated, final LongSupplier currentTimeMillisSupplier, - final BiConsumer, ActionListener> onNewRetentionLease) { + final BiConsumer, ActionListener> onSyncRetentionLeases) { super(shardId, indexSettings); assert globalCheckpoint >= SequenceNumbers.UNASSIGNED_SEQ_NO : "illegal initial global checkpoint: " + globalCheckpoint; this.shardAllocationId = allocationId; @@ -524,7 +548,7 @@ public ReplicationTracker( checkpoints.put(allocationId, new CheckpointState(SequenceNumbers.UNASSIGNED_SEQ_NO, globalCheckpoint, false, false)); this.onGlobalCheckpointUpdated = Objects.requireNonNull(onGlobalCheckpointUpdated); this.currentTimeMillisSupplier = Objects.requireNonNull(currentTimeMillisSupplier); - this.onNewRetentionLease = Objects.requireNonNull(onNewRetentionLease); + this.onSyncRetentionLeases = Objects.requireNonNull(onSyncRetentionLeases); this.pendingInSync = new HashSet<>(); this.routingTable = null; this.replicationGroup = null; diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index 3dafb93d65400..7a867027412e1 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.routing.AllocationId; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; @@ -30,6 +31,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -67,17 +69,17 @@ public void testAddOrRenewRetentionLease() { minimumRetainingSequenceNumbers[i] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); replicationTracker.addRetentionLease( Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> {})); - assertRetentionLeases(replicationTracker, i + 1, minimumRetainingSequenceNumbers, () -> 0L); + assertRetentionLeases(replicationTracker, i + 1, minimumRetainingSequenceNumbers, () -> 0L, true); } for (int i = 0; i < length; i++) { minimumRetainingSequenceNumbers[i] = randomLongBetween(minimumRetainingSequenceNumbers[i], Long.MAX_VALUE); replicationTracker.renewRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i); - assertRetentionLeases(replicationTracker, length, minimumRetainingSequenceNumbers, () -> 0L); + assertRetentionLeases(replicationTracker, length, minimumRetainingSequenceNumbers, () -> 0L, true); } } - public void testOnNewRetentionLease() { + public void testAddRetentionLeaseCausesRetentionLeaseSync() { final AllocationId allocationId = AllocationId.newInitializing(); final Map retentionLeases = new HashMap<>(); final AtomicBoolean invoked = new AtomicBoolean(); @@ -113,6 +115,7 @@ public void testOnNewRetentionLease() { replicationTracker.addRetentionLease(id, retainingSequenceNumber, "test", ActionListener.wrap(() -> {})); // assert that the new retention lease callback was invoked assertTrue(invoked.get()); + // reset the invocation marker so that we can assert the callback was not invoked when renewing the lease invoked.set(false); replicationTracker.renewRetentionLease(id, retainingSequenceNumber, "test"); @@ -120,7 +123,15 @@ public void testOnNewRetentionLease() { } } - public void testExpiration() { + public void testExpirationOnPrimary() { + runExpirationTest(true); + } + + public void testExpirationOnReplica() { + runExpirationTest(false); + } + + private void runExpirationTest(final boolean primaryMode) { final AllocationId allocationId = AllocationId.newInitializing(); final AtomicLong currentTimeMillis = new AtomicLong(randomLongBetween(0, 1024)); final long retentionLeaseMillis = randomLongBetween(1, TimeValue.timeValueHours(12).millis()); @@ -141,42 +152,136 @@ public void testExpiration() { Collections.singleton(allocationId.getId()), routingTable(Collections.emptySet(), allocationId), Collections.emptySet()); - replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + if (primaryMode) { + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + } final long[] retainingSequenceNumbers = new long[1]; retainingSequenceNumbers[0] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); - replicationTracker.addRetentionLease("0", retainingSequenceNumbers[0], "test-0", ActionListener.wrap(() -> {})); + if (primaryMode) { + replicationTracker.addRetentionLease("0", retainingSequenceNumbers[0], "test-0", ActionListener.wrap(() -> {})); + } else { + replicationTracker.updateRetentionLeasesOnReplica( + Collections.singleton(new RetentionLease("0", retainingSequenceNumbers[0], currentTimeMillis.get(), "test-0"))); + } { final Collection retentionLeases = replicationTracker.getRetentionLeases(); assertThat(retentionLeases, hasSize(1)); final RetentionLease retentionLease = retentionLeases.iterator().next(); assertThat(retentionLease.timestamp(), equalTo(currentTimeMillis.get())); - assertRetentionLeases(replicationTracker, 1, retainingSequenceNumbers, currentTimeMillis::get); + assertRetentionLeases(replicationTracker, 1, retainingSequenceNumbers, currentTimeMillis::get, primaryMode); } // renew the lease currentTimeMillis.set(currentTimeMillis.get() + randomLongBetween(0, 1024)); retainingSequenceNumbers[0] = randomLongBetween(retainingSequenceNumbers[0], Long.MAX_VALUE); - replicationTracker.renewRetentionLease("0", retainingSequenceNumbers[0], "test-0"); + if (primaryMode) { + replicationTracker.renewRetentionLease("0", retainingSequenceNumbers[0], "test-0"); + } else { + replicationTracker.updateRetentionLeasesOnReplica( + Collections.singleton(new RetentionLease("0", retainingSequenceNumbers[0], currentTimeMillis.get(), "test-0"))); + } { final Collection retentionLeases = replicationTracker.getRetentionLeases(); assertThat(retentionLeases, hasSize(1)); final RetentionLease retentionLease = retentionLeases.iterator().next(); assertThat(retentionLease.timestamp(), equalTo(currentTimeMillis.get())); - assertRetentionLeases(replicationTracker, 1, retainingSequenceNumbers, currentTimeMillis::get); + assertRetentionLeases(replicationTracker, 1, retainingSequenceNumbers, currentTimeMillis::get, primaryMode); } // now force the lease to expire currentTimeMillis.set(currentTimeMillis.get() + randomLongBetween(retentionLeaseMillis, Long.MAX_VALUE - currentTimeMillis.get())); - assertRetentionLeases(replicationTracker, 0, retainingSequenceNumbers, currentTimeMillis::get); + if (primaryMode) { + assertRetentionLeases(replicationTracker, 0, retainingSequenceNumbers, currentTimeMillis::get, true); + } else { + // leases do not expire on replicas until synced from the primary + assertRetentionLeases(replicationTracker, 1, retainingSequenceNumbers, currentTimeMillis::get, false); + } + } + + public void testRetentionLeaseExpirationCausesRetentionLeaseSync() { + final AllocationId allocationId = AllocationId.newInitializing(); + final AtomicLong currentTimeMillis = new AtomicLong(randomLongBetween(0, 1024)); + final long retentionLeaseMillis = randomLongBetween(1, TimeValue.timeValueHours(12).millis()); + final Settings settings = Settings + .builder() + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey(), TimeValue.timeValueMillis(retentionLeaseMillis)) + .build(); + final Map> retentionLeases = new HashMap<>(); + final AtomicBoolean invoked = new AtomicBoolean(); + final AtomicReference reference = new AtomicReference<>(); + final ReplicationTracker replicationTracker = new ReplicationTracker( + new ShardId("test", "_na", 0), + allocationId.getId(), + IndexSettingsModule.newIndexSettings("test", settings), + UNASSIGNED_SEQ_NO, + value -> {}, + currentTimeMillis::get, + (leases, listener) -> { + // we do not want to hold a lock on the replication tracker in the callback! + assertFalse(Thread.holdsLock(reference.get())); + invoked.set(true); + assertThat( + leases.stream().collect(Collectors.toMap(RetentionLease::id, ReplicationTrackerRetentionLeaseTests::toTuple)), + equalTo(retentionLeases)); + }); + reference.set(replicationTracker); + replicationTracker.updateFromMaster( + randomNonNegativeLong(), + Collections.singleton(allocationId.getId()), + routingTable(Collections.emptySet(), allocationId), + Collections.emptySet()); + replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + + final int length = randomIntBetween(0, 8); + for (int i = 0; i < length; i++) { + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + retentionLeases.put(id, Tuple.tuple(retainingSequenceNumber, currentTimeMillis.get())); + replicationTracker.addRetentionLease(id, retainingSequenceNumber, "test", ActionListener.wrap(() -> {})); + // assert that the new retention lease callback was invoked + assertTrue(invoked.get()); + + // reset the invocation marker so that we can assert the callback was not invoked when renewing the lease + invoked.set(false); + currentTimeMillis.set(1 + currentTimeMillis.get()); + retentionLeases.put(id, Tuple.tuple(retainingSequenceNumber, currentTimeMillis.get())); + replicationTracker.renewRetentionLease(id, retainingSequenceNumber, "test"); + + // reset the invocation marker so that we can assert the callback was invoked if any leases are expired + assertFalse(invoked.get()); + // randomly expire some leases + final long currentTimeMillisIncrement = randomLongBetween(0, Long.MAX_VALUE - currentTimeMillis.get()); + // calculate the expired leases and update our tracking map + final List expiredIds = retentionLeases.entrySet() + .stream() + .filter(r -> currentTimeMillis.get() + currentTimeMillisIncrement > r.getValue().v2() + retentionLeaseMillis) + .map(Map.Entry::getKey) + .collect(Collectors.toList()); + expiredIds.forEach(retentionLeases::remove); + currentTimeMillis.set(currentTimeMillis.get() + currentTimeMillisIncrement); + // getting the leases has the side effect of calculating which leases are expired and invoking the sync callback + final Collection current = replicationTracker.getRetentionLeases(); + // the current leases should equal our tracking map + assertThat( + current.stream().collect(Collectors.toMap(RetentionLease::id, ReplicationTrackerRetentionLeaseTests::toTuple)), + equalTo(retentionLeases)); + // the callback should only be invoked if there were expired leases + assertThat(invoked.get(), equalTo(expiredIds.isEmpty() == false)); + } + } + + private static Tuple toTuple(final RetentionLease retentionLease) { + return Tuple.tuple(retentionLease.retainingSequenceNumber(), retentionLease.timestamp()); } private void assertRetentionLeases( final ReplicationTracker replicationTracker, final int size, final long[] minimumRetainingSequenceNumbers, - final LongSupplier currentTimeMillisSupplier) { + final LongSupplier currentTimeMillisSupplier, + final boolean primaryMode) { final Collection retentionLeases = replicationTracker.getRetentionLeases(); final Map idToRetentionLease = new HashMap<>(); for (final RetentionLease retentionLease : retentionLeases) { @@ -188,9 +293,12 @@ private void assertRetentionLeases( assertThat(idToRetentionLease.keySet(), hasItem(Integer.toString(i))); final RetentionLease retentionLease = idToRetentionLease.get(Integer.toString(i)); assertThat(retentionLease.retainingSequenceNumber(), equalTo(minimumRetainingSequenceNumbers[i])); - assertThat( - currentTimeMillisSupplier.getAsLong() - retentionLease.timestamp(), - lessThanOrEqualTo(replicationTracker.indexSettings().getRetentionLeaseMillis())); + if (primaryMode) { + // retention leases can be expired on replicas, so we can only assert on primaries here + assertThat( + currentTimeMillisSupplier.getAsLong() - retentionLease.timestamp(), + lessThanOrEqualTo(replicationTracker.indexSettings().getRetentionLeaseMillis())); + } assertThat(retentionLease.source(), equalTo("test-" + i)); } } diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java index fad9e25db12d6..7d6e5fa2dc5a6 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseSyncIT.java @@ -23,20 +23,28 @@ import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.threadpool.ThreadPool; import java.util.Collection; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; public class RetentionLeaseSyncIT extends ESIntegTestCase { @@ -89,6 +97,73 @@ public void testRetentionLeasesSyncedOnAdd() throws Exception { } } + public void testRetentionLeasesSyncOnExpiration() throws Exception { + final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2); + internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); + final long estimatedTimeIntervalMillis = ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.get(Settings.EMPTY).millis(); + final TimeValue retentionLeaseTimeToLive = + TimeValue.timeValueMillis(randomLongBetween(estimatedTimeIntervalMillis, 2 * estimatedTimeIntervalMillis)); + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", numberOfReplicas) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey(), retentionLeaseTimeToLive) + .build(); + createIndex("index", settings); + ensureGreen("index"); + final String primaryShardNodeId = clusterService().state().routingTable().index("index").shard(0).primaryShard().currentNodeId(); + final String primaryShardNodeName = clusterService().state().nodes().get(primaryShardNodeId).getName(); + final IndexShard primary = internalCluster() + .getInstance(IndicesService.class, primaryShardNodeName) + .getShardOrNull(new ShardId(resolveIndex("index"), 0)); + // we will add multiple retention leases, wait for some to expire, and assert a consistent view between the primary and the replicas + final int length = randomIntBetween(1, 8); + for (int i = 0; i < length; i++) { + final String id = randomAlphaOfLength(8); + final long retainingSequenceNumber = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); + final String source = randomAlphaOfLength(8); + final CountDownLatch latch = new CountDownLatch(1); + final ActionListener listener = ActionListener.wrap(r -> latch.countDown(), e -> fail(e.toString())); + final RetentionLease currentRetentionLease = primary.addRetentionLease(id, retainingSequenceNumber, source, listener); + final long now = System.nanoTime(); + latch.await(); + + // check current retention leases have been synced to all replicas + for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) { + final String replicaShardNodeId = replicaShard.currentNodeId(); + final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName(); + final IndexShard replica = internalCluster() + .getInstance(IndicesService.class, replicaShardNodeName) + .getShardOrNull(new ShardId(resolveIndex("index"), 0)); + assertThat(replica.getRetentionLeases(), hasItem(currentRetentionLease)); + } + + // sleep long enough that *possibly* the current retention lease has expired, and certainly that any previous have + final long later = System.nanoTime(); + Thread.sleep(Math.max(0, retentionLeaseTimeToLive.millis() - TimeUnit.NANOSECONDS.toMillis(later - now))); + final Collection currentRetentionLeases = primary.getRetentionLeases(); + assertThat(currentRetentionLeases, anyOf(empty(), contains(currentRetentionLease))); + + /* + * Check that expiration of retention leases has been synced to all replicas. We have to assert busy since syncing happens in + * the background. + */ + assertBusy(() -> { + for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) { + final String replicaShardNodeId = replicaShard.currentNodeId(); + final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName(); + final IndexShard replica = internalCluster() + .getInstance(IndicesService.class, replicaShardNodeName) + .getShardOrNull(new ShardId(resolveIndex("index"), 0)); + if (currentRetentionLeases.isEmpty()) { + assertThat(replica.getRetentionLeases(), empty()); + } else { + assertThat(replica.getRetentionLeases(), contains(currentRetentionLeases.toArray(new RetentionLease[0]))); + } + } + }); + } + } + private static Map toMap(final Collection replicaCommittedRetentionLeases) { return replicaCommittedRetentionLeases.stream().collect(Collectors.toMap(RetentionLease::id, Function.identity())); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java index eff1edfed52ba..cd7d2a2c12cb8 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardRetentionLeaseTests.java @@ -35,6 +35,7 @@ import java.io.IOException; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.ExecutorService; @@ -81,57 +82,79 @@ public void testAddOrRenewRetentionLease() throws IOException { minimumRetainingSequenceNumbers[i] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); indexShard.addRetentionLease( Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i, ActionListener.wrap(() -> {})); - assertRetentionLeases(indexShard, i + 1, minimumRetainingSequenceNumbers, () -> 0L); + assertRetentionLeases(indexShard, i + 1, minimumRetainingSequenceNumbers, () -> 0L, true); } for (int i = 0; i < length; i++) { minimumRetainingSequenceNumbers[i] = randomLongBetween(minimumRetainingSequenceNumbers[i], Long.MAX_VALUE); indexShard.renewRetentionLease(Integer.toString(i), minimumRetainingSequenceNumbers[i], "test-" + i); - assertRetentionLeases(indexShard, length, minimumRetainingSequenceNumbers, () -> 0L); + assertRetentionLeases(indexShard, length, minimumRetainingSequenceNumbers, () -> 0L, true); } } finally { closeShards(indexShard); } } - public void testExpiration() throws IOException { + public void testExpirationOnPrimary() throws IOException { + runExpirationTest(true); + } + + public void testExpirationOnReplica() throws IOException { + runExpirationTest(false); + } + + private void runExpirationTest(final boolean primary) throws IOException { final long retentionLeaseMillis = randomLongBetween(1, TimeValue.timeValueHours(12).millis()); final Settings settings = Settings .builder() .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_SETTING.getKey(), TimeValue.timeValueMillis(retentionLeaseMillis)) .build(); // current time is mocked through the thread pool - final IndexShard indexShard = newStartedShard(true, settings, new InternalEngineFactory()); + final IndexShard indexShard = newStartedShard(primary, settings, new InternalEngineFactory()); try { final long[] retainingSequenceNumbers = new long[1]; retainingSequenceNumbers[0] = randomLongBetween(SequenceNumbers.NO_OPS_PERFORMED, Long.MAX_VALUE); - indexShard.addRetentionLease("0", retainingSequenceNumbers[0], "test-0", ActionListener.wrap(() -> {})); + if (primary) { + indexShard.addRetentionLease("0", retainingSequenceNumbers[0], "test-0", ActionListener.wrap(() -> {})); + } else { + indexShard.updateRetentionLeasesOnReplica( + Collections.singleton(new RetentionLease("0", retainingSequenceNumbers[0], currentTimeMillis.get(), "test-0"))); + } { final Collection retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get(); assertThat(retentionLeases, hasSize(1)); final RetentionLease retentionLease = retentionLeases.iterator().next(); assertThat(retentionLease.timestamp(), equalTo(currentTimeMillis.get())); - assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, currentTimeMillis::get); + assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, currentTimeMillis::get, primary); } // renew the lease currentTimeMillis.set(currentTimeMillis.get() + randomLongBetween(0, 1024)); retainingSequenceNumbers[0] = randomLongBetween(retainingSequenceNumbers[0], Long.MAX_VALUE); - indexShard.renewRetentionLease("0", retainingSequenceNumbers[0], "test-0"); + if (primary) { + indexShard.renewRetentionLease("0", retainingSequenceNumbers[0], "test-0"); + } else { + indexShard.updateRetentionLeasesOnReplica( + Collections.singleton(new RetentionLease("0", retainingSequenceNumbers[0], currentTimeMillis.get(), "test-0"))); + } { final Collection retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get(); assertThat(retentionLeases, hasSize(1)); final RetentionLease retentionLease = retentionLeases.iterator().next(); assertThat(retentionLease.timestamp(), equalTo(currentTimeMillis.get())); - assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, currentTimeMillis::get); + assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, currentTimeMillis::get, primary); } // now force the lease to expire currentTimeMillis.set( currentTimeMillis.get() + randomLongBetween(retentionLeaseMillis, Long.MAX_VALUE - currentTimeMillis.get())); - assertRetentionLeases(indexShard, 0, retainingSequenceNumbers, currentTimeMillis::get); + if (primary) { + assertRetentionLeases(indexShard, 0, retainingSequenceNumbers, currentTimeMillis::get, true); + } else { + assertRetentionLeases(indexShard, 1, retainingSequenceNumbers, currentTimeMillis::get, false); + } } finally { closeShards(indexShard); } @@ -196,7 +219,8 @@ private void assertRetentionLeases( final IndexShard indexShard, final int size, final long[] minimumRetainingSequenceNumbers, - final LongSupplier currentTimeMillisSupplier) { + final LongSupplier currentTimeMillisSupplier, + final boolean primary) { final Collection retentionLeases = indexShard.getEngine().config().retentionLeasesSupplier().get(); final Map idToRetentionLease = new HashMap<>(); for (final RetentionLease retentionLease : retentionLeases) { @@ -208,9 +232,12 @@ private void assertRetentionLeases( assertThat(idToRetentionLease.keySet(), hasItem(Integer.toString(i))); final RetentionLease retentionLease = idToRetentionLease.get(Integer.toString(i)); assertThat(retentionLease.retainingSequenceNumber(), equalTo(minimumRetainingSequenceNumbers[i])); - assertThat( - currentTimeMillisSupplier.getAsLong() - retentionLease.timestamp(), - lessThanOrEqualTo(indexShard.indexSettings().getRetentionLeaseMillis())); + if (primary) { + // retention leases can be expired on replicas, so we can only assert on primaries here + assertThat( + currentTimeMillisSupplier.getAsLong() - retentionLease.timestamp(), + lessThanOrEqualTo(indexShard.indexSettings().getRetentionLeaseMillis())); + } assertThat(retentionLease.source(), equalTo("test-" + i)); } } From 64b98db9736cc42a4a4ba80704684d0be468fe13 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Mon, 28 Jan 2019 14:26:22 +0200 Subject: [PATCH 27/57] Add an alias for :server:integTest so it runs as part of internalClusterTest (#37910) --- server/build.gradle | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/server/build.gradle b/server/build.gradle index 5e2ae5939dad0..4fc8c451c9b96 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -339,4 +339,8 @@ if (isEclipse == false || project.path == ":server-tests") { } check.dependsOn integTest integTest.mustRunAfter test + task internalClusterTest { + dependsOn integTest + } } + From a9adc16922c45d57f84c473658b4013e7d15131a Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Mon, 28 Jan 2019 13:41:05 +0100 Subject: [PATCH 28/57] Mute failing SearchQueryIT test Relates to #37814 --- .../test/java/org/elasticsearch/search/query/SearchQueryIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index b7d51798ab7df..58302428848b3 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -426,6 +426,7 @@ public void testDateRangeInQueryString() { assertThat(e.toString(), containsString("unit [D] not supported for date math")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37814") // Issue #7880 public void testDateRangeInQueryStringWithTimeZone_7880() { //the mapping needs to be provided upfront otherwise we are not sure how many failures we get back From 445db97867fd032aecfcd753ffb47a9aaa6ada93 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 28 Jan 2019 13:55:53 +0100 Subject: [PATCH 29/57] each full cluster restart round should use its own repository, otherwise snapshots from e.g. 6.5.5 to current and 6.6.0 to current full cluster restart round collides. --- qa/full-cluster-restart/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index 39bec5ac0b3df..cd5e836757c35 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -39,7 +39,7 @@ for (Version version : bwcVersions.indexCompatible) { tasks.getByName("${baseName}#oldClusterTestRunner").configure { systemProperty 'tests.is_old_cluster', 'true' systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") - systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") + systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo/" + baseName) } Object extension = extensions.findByName("${baseName}#oldClusterTestCluster") From 519423b8f39364c1ff78c4c9c98095e77572800d Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Mon, 28 Jan 2019 14:07:12 +0100 Subject: [PATCH 30/57] Mute failing full-cluster-restart tests Relates to #37920 --- .../java/org/elasticsearch/upgrades/FullClusterRestartIT.java | 4 ++++ .../java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java | 1 + 2 files changed, 5 insertions(+) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 46038da21dfc0..750e1612575f0 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -85,6 +85,7 @@ public void setIndex() throws IOException { index = getTestName().toLowerCase(Locale.ROOT); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37920") public void testSearch() throws Exception { int count; if (isRunningAgainstOldCluster()) { @@ -152,6 +153,7 @@ public void testSearch() throws Exception { assertStoredBinaryFields(count); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37920") public void testNewReplicasWork() throws Exception { if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); @@ -330,6 +332,7 @@ public void testClusterState() throws Exception { } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37920") public void testShrink() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; @@ -398,6 +401,7 @@ public void testShrink() throws IOException { assertEquals(numDocs, totalHits); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37920") public void testShrinkAfterUpgrade() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index c3cd8f61538fe..158d5dfda5844 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -142,6 +142,7 @@ private static void addCandidate(String querySource, QueryBuilder expectedQb) { CANDIDATES.add(new Object[]{"{\"query\": {" + querySource + "}}", expectedQb}); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37920") public void testQueryBuilderBWC() throws Exception { String index = "queries"; if (isRunningAgainstOldCluster()) { From 0d109396faac0dd41b482196c822a4b9886552a0 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 28 Jan 2019 14:13:02 +0100 Subject: [PATCH 31/57] Increase Timeout in #testSnapshotCanceled (#37890) * The test failure reported in the issue looks like a mere timeout. Logging suggestst hat the snapshot completes/aborts correctly but the busy loop polling the snapshot state times out too early. * Closes #37888 --- .../snapshots/SharedClusterSnapshotRestoreIT.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index a4d4c31517a7e..1a1b886e0e373 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -3179,7 +3179,6 @@ public void testGetSnapshotsRequest() throws Exception { * * See https://github.com/elastic/elasticsearch/issues/20876 */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37888") public void testSnapshotCanceledOnRemovedShard() throws Exception { final int numPrimaries = 1; final int numReplicas = 1; @@ -3229,7 +3228,7 @@ public void testSnapshotCanceledOnRemovedShard() throws Exception { unblockNode(repo, blockedNode); logger.info("--> ensuring snapshot is aborted and the aborted shard was marked as failed"); - SnapshotInfo snapshotInfo = waitForCompletion(repo, snapshot, TimeValue.timeValueSeconds(10)); + SnapshotInfo snapshotInfo = waitForCompletion(repo, snapshot, TimeValue.timeValueSeconds(60)); assertEquals(1, snapshotInfo.shardFailures().size()); assertEquals(0, snapshotInfo.shardFailures().get(0).shardId()); assertEquals("IndexShardSnapshotFailedException[Aborted]", snapshotInfo.shardFailures().get(0).reason()); From e401ab172491a64f0c97c9c4ebf1626e3b908bba Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 28 Jan 2019 15:02:50 +0100 Subject: [PATCH 32/57] Handle deprecation warnings in a permissive manner. Closes #37920 --- .../upgrades/FullClusterRestartIT.java | 18 ++++++++++++++---- .../upgrades/QueryBuilderBWCIT.java | 6 +++++- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 750e1612575f0..43bb7401dba58 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -22,9 +22,11 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.CheckedFunction; @@ -85,7 +87,6 @@ public void setIndex() throws IOException { index = getTestName().toLowerCase(Locale.ROOT); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37920") public void testSearch() throws Exception { int count; if (isRunningAgainstOldCluster()) { @@ -124,6 +125,9 @@ public void testSearch() throws Exception { mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + RequestOptions.Builder options = createIndex.getOptions().toBuilder(); + options.setWarningsHandler(WarningsHandler.PERMISSIVE); + createIndex.setOptions(options); client().performRequest(createIndex); count = randomIntBetween(2000, 3000); @@ -153,7 +157,6 @@ public void testSearch() throws Exception { assertStoredBinaryFields(count); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37920") public void testNewReplicasWork() throws Exception { if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); @@ -180,6 +183,9 @@ public void testNewReplicasWork() throws Exception { mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + RequestOptions.Builder options = createIndex.getOptions().toBuilder(); + options.setWarningsHandler(WarningsHandler.PERMISSIVE); + createIndex.setOptions(options); client().performRequest(createIndex); int numDocs = randomIntBetween(2000, 3000); @@ -332,7 +338,6 @@ public void testClusterState() throws Exception { } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37920") public void testShrink() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; @@ -355,6 +360,9 @@ public void testShrink() throws IOException { mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + RequestOptions.Builder options = createIndex.getOptions().toBuilder(); + options.setWarningsHandler(WarningsHandler.PERMISSIVE); + createIndex.setOptions(options); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); @@ -401,7 +409,6 @@ public void testShrink() throws IOException { assertEquals(numDocs, totalHits); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37920") public void testShrinkAfterUpgrade() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; @@ -424,6 +431,9 @@ public void testShrinkAfterUpgrade() throws IOException { mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + RequestOptions.Builder options = createIndex.getOptions().toBuilder(); + options.setWarningsHandler(WarningsHandler.PERMISSIVE); + createIndex.setOptions(options); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index 158d5dfda5844..10bdcc234c656 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -21,7 +21,9 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; +import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; @@ -142,7 +144,6 @@ private static void addCandidate(String querySource, QueryBuilder expectedQb) { CANDIDATES.add(new Object[]{"{\"query\": {" + querySource + "}}", expectedQb}); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37920") public void testQueryBuilderBWC() throws Exception { String index = "queries"; if (isRunningAgainstOldCluster()) { @@ -179,6 +180,9 @@ public void testQueryBuilderBWC() throws Exception { } mappingsAndSettings.endObject(); Request request = new Request("PUT", "/" + index); + RequestOptions.Builder options = request.getOptions().toBuilder(); + options.setWarningsHandler(WarningsHandler.PERMISSIVE); + request.setOptions(options); request.setJsonEntity(Strings.toString(mappingsAndSettings)); Response rsp = client().performRequest(request); assertEquals(200, rsp.getStatusLine().getStatusCode()); From 7e4c0e699171d8dd93b0bf0d2e6939231cd67683 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Mon, 28 Jan 2019 09:07:30 -0600 Subject: [PATCH 33/57] ML: Adds set_upgrade_mode API endpoint (#37837) * ML: Add MlMetadata.upgrade_mode and API * Adding tests * Adding wait conditionals for the upgrade_mode call to return * Adding tests * adjusting format and tests * Adjusting wait conditions for api return and msgs * adjusting doc tests * adding upgrade mode tests to black list --- docs/reference/ml/apis/get-ml-info.asciidoc | 1 + .../xpack/core/XPackClientPlugin.java | 2 + .../xpack/core/ml/MlMetadata.java | 48 +++- .../elasticsearch/xpack/core/ml/MlTasks.java | 8 +- .../core/ml/action/SetUpgradeModeAction.java | 115 ++++++++ .../SetUpgradeModeActionRequestTests.java | 34 +++ .../authz/store/ReservedRolesStoreTests.java | 3 + .../ml/qa/ml-with-security/build.gradle | 6 +- .../ml/integration/SetUpgradeModeIT.java | 173 +++++++++++ .../xpack/ml/MachineLearning.java | 11 +- .../ml/action/TransportMlInfoAction.java | 6 + .../ml/action/TransportOpenJobAction.java | 36 ++- .../action/TransportSetUpgradeModeAction.java | 271 ++++++++++++++++++ .../ml/datafeed/DatafeedNodeSelector.java | 16 ++ .../autodetect/AutodetectCommunicator.java | 13 +- .../autodetect/AutodetectProcessManager.java | 40 ++- .../process/autodetect/ProcessContext.java | 8 +- .../ml/rest/RestSetUpgradeModeAction.java | 49 ++++ .../datafeed/DatafeedNodeSelectorTests.java | 37 +++ .../AutodetectCommunicatorTests.java | 8 +- .../AutodetectProcessManagerTests.java | 61 ++-- .../api/ml.set_upgrade_mode.json | 21 ++ .../rest-api-spec/test/ml/ml_info.yml | 3 + .../test/ml/set_upgrade_mode.yml | 212 ++++++++++++++ 24 files changed, 1112 insertions(+), 70 deletions(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeActionRequestTests.java create mode 100644 x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java create mode 100644 x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestSetUpgradeModeAction.java create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/api/ml.set_upgrade_mode.json create mode 100644 x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml diff --git a/docs/reference/ml/apis/get-ml-info.asciidoc b/docs/reference/ml/apis/get-ml-info.asciidoc index f692ede711f02..67484c0073179 100644 --- a/docs/reference/ml/apis/get-ml-info.asciidoc +++ b/docs/reference/ml/apis/get-ml-info.asciidoc @@ -54,6 +54,7 @@ This is a possible response: "scroll_size" : 1000 } }, + "upgrade_mode": false, "limits" : { } } ---- diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 25b745c4f499a..1b6f128318a2a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -108,6 +108,7 @@ import org.elasticsearch.xpack.core.ml.action.PutFilterAction; import org.elasticsearch.xpack.core.ml.action.PutJobAction; import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; +import org.elasticsearch.xpack.core.ml.action.SetUpgradeModeAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction; @@ -291,6 +292,7 @@ public List> getClientActions() { PostCalendarEventsAction.INSTANCE, PersistJobAction.INSTANCE, FindFileStructureAction.INSTANCE, + SetUpgradeModeAction.INSTANCE, // security ClearRealmCacheAction.INSTANCE, ClearRolesCacheAction.INSTANCE, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index 643ba9101a068..43462c552da63 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -48,8 +48,9 @@ public class MlMetadata implements XPackPlugin.XPackMetaDataCustom { public static final String TYPE = "ml"; private static final ParseField JOBS_FIELD = new ParseField("jobs"); private static final ParseField DATAFEEDS_FIELD = new ParseField("datafeeds"); + public static final ParseField UPGRADE_MODE = new ParseField("upgrade_mode"); - public static final MlMetadata EMPTY_METADATA = new MlMetadata(Collections.emptySortedMap(), Collections.emptySortedMap()); + public static final MlMetadata EMPTY_METADATA = new MlMetadata(Collections.emptySortedMap(), Collections.emptySortedMap(), false); // This parser follows the pattern that metadata is parsed leniently (to allow for enhancements) public static final ObjectParser LENIENT_PARSER = new ObjectParser<>("ml_metadata", true, Builder::new); @@ -57,16 +58,20 @@ public class MlMetadata implements XPackPlugin.XPackMetaDataCustom { LENIENT_PARSER.declareObjectArray(Builder::putJobs, (p, c) -> Job.LENIENT_PARSER.apply(p, c).build(), JOBS_FIELD); LENIENT_PARSER.declareObjectArray(Builder::putDatafeeds, (p, c) -> DatafeedConfig.LENIENT_PARSER.apply(p, c).build(), DATAFEEDS_FIELD); + LENIENT_PARSER.declareBoolean(Builder::isUpgradeMode, UPGRADE_MODE); + } private final SortedMap jobs; private final SortedMap datafeeds; + private final boolean upgradeMode; private final GroupOrJobLookup groupOrJobLookup; - private MlMetadata(SortedMap jobs, SortedMap datafeeds) { + private MlMetadata(SortedMap jobs, SortedMap datafeeds, boolean upgradeMode) { this.jobs = Collections.unmodifiableSortedMap(jobs); this.datafeeds = Collections.unmodifiableSortedMap(datafeeds); this.groupOrJobLookup = new GroupOrJobLookup(jobs.values()); + this.upgradeMode = upgradeMode; } public Map getJobs() { @@ -94,6 +99,10 @@ public Set expandDatafeedIds(String expression, boolean allowNoDatafeeds .expand(expression, allowNoDatafeeds); } + public boolean isUpgradeMode() { + return upgradeMode; + } + @Override public Version getMinimalSupportedVersion() { return Version.V_6_0_0_alpha1; @@ -128,12 +137,20 @@ public MlMetadata(StreamInput in) throws IOException { } this.datafeeds = datafeeds; this.groupOrJobLookup = new GroupOrJobLookup(jobs.values()); + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + this.upgradeMode = in.readBoolean(); + } else { + this.upgradeMode = false; + } } @Override public void writeTo(StreamOutput out) throws IOException { writeMap(jobs, out); writeMap(datafeeds, out); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeBoolean(upgradeMode); + } } private static void writeMap(Map map, StreamOutput out) throws IOException { @@ -150,6 +167,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws new DelegatingMapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true"), params); mapValuesToXContent(JOBS_FIELD, jobs, builder, extendedParams); mapValuesToXContent(DATAFEEDS_FIELD, datafeeds, builder, extendedParams); + builder.field(UPGRADE_MODE.getPreferredName(), upgradeMode); return builder; } @@ -170,10 +188,12 @@ public static class MlMetadataDiff implements NamedDiff { final Diff> jobs; final Diff> datafeeds; + final boolean upgradeMode; MlMetadataDiff(MlMetadata before, MlMetadata after) { this.jobs = DiffableUtils.diff(before.jobs, after.jobs, DiffableUtils.getStringKeySerializer()); this.datafeeds = DiffableUtils.diff(before.datafeeds, after.datafeeds, DiffableUtils.getStringKeySerializer()); + this.upgradeMode = after.upgradeMode; } public MlMetadataDiff(StreamInput in) throws IOException { @@ -181,6 +201,11 @@ public MlMetadataDiff(StreamInput in) throws IOException { MlMetadataDiff::readJobDiffFrom); this.datafeeds = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), DatafeedConfig::new, MlMetadataDiff::readDatafeedDiffFrom); + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + upgradeMode = in.readBoolean(); + } else { + upgradeMode = false; + } } /** @@ -192,13 +217,16 @@ public MlMetadataDiff(StreamInput in) throws IOException { public MetaData.Custom apply(MetaData.Custom part) { TreeMap newJobs = new TreeMap<>(jobs.apply(((MlMetadata) part).jobs)); TreeMap newDatafeeds = new TreeMap<>(datafeeds.apply(((MlMetadata) part).datafeeds)); - return new MlMetadata(newJobs, newDatafeeds); + return new MlMetadata(newJobs, newDatafeeds, upgradeMode); } @Override public void writeTo(StreamOutput out) throws IOException { jobs.writeTo(out); datafeeds.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeBoolean(upgradeMode); + } } @Override @@ -223,7 +251,8 @@ public boolean equals(Object o) { return false; MlMetadata that = (MlMetadata) o; return Objects.equals(jobs, that.jobs) && - Objects.equals(datafeeds, that.datafeeds); + Objects.equals(datafeeds, that.datafeeds) && + Objects.equals(upgradeMode, that.upgradeMode); } @Override @@ -233,13 +262,14 @@ public final String toString() { @Override public int hashCode() { - return Objects.hash(jobs, datafeeds); + return Objects.hash(jobs, datafeeds, upgradeMode); } public static class Builder { private TreeMap jobs; private TreeMap datafeeds; + private boolean upgradeMode; public Builder() { jobs = new TreeMap<>(); @@ -253,6 +283,7 @@ public Builder(@Nullable MlMetadata previous) { } else { jobs = new TreeMap<>(previous.jobs); datafeeds = new TreeMap<>(previous.datafeeds); + upgradeMode = previous.upgradeMode; } } @@ -318,8 +349,13 @@ public Builder putDatafeeds(Collection datafeeds) { return this; } + public Builder isUpgradeMode(boolean upgradeMode) { + this.upgradeMode = upgradeMode; + return this; + } + public MlMetadata build() { - return new MlMetadata(jobs, datafeeds); + return new MlMetadata(jobs, datafeeds, upgradeMode); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java index c166f64c4e3ef..cd32505a48e3e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java @@ -24,8 +24,12 @@ public final class MlTasks { public static final String JOB_TASK_NAME = "xpack/ml/job"; public static final String DATAFEED_TASK_NAME = "xpack/ml/datafeed"; - private static final String JOB_TASK_ID_PREFIX = "job-"; - private static final String DATAFEED_TASK_ID_PREFIX = "datafeed-"; + public static final String JOB_TASK_ID_PREFIX = "job-"; + public static final String DATAFEED_TASK_ID_PREFIX = "datafeed-"; + + public static final PersistentTasksCustomMetaData.Assignment AWAITING_UPGRADE = + new PersistentTasksCustomMetaData.Assignment(null, + "persistent task cannot be assigned while upgrade mode is enabled."); private MlTasks() { } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java new file mode 100644 index 0000000000000..ac9ae2b084498 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeAction.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class SetUpgradeModeAction extends Action { + + public static final SetUpgradeModeAction INSTANCE = new SetUpgradeModeAction(); + public static final String NAME = "cluster:admin/xpack/ml/upgrade_mode"; + + private SetUpgradeModeAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + public static class Request extends AcknowledgedRequest implements ToXContentObject { + + private boolean enabled; + + private static final ParseField ENABLED = new ParseField("enabled"); + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME, a -> new Request((Boolean)a[0])); + + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), ENABLED); + } + + public Request(boolean enabled) { + this.enabled = enabled; + } + + public Request(StreamInput in) throws IOException { + readFrom(in); + } + + public Request() { + } + + public boolean isEnabled() { + return enabled; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.enabled = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(enabled); + } + + @Override + public int hashCode() { + return Objects.hash(enabled); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || obj.getClass() != getClass()) { + return false; + } + Request other = (Request) obj; + return Objects.equals(enabled, other.enabled); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ENABLED.getPreferredName(), enabled); + builder.endObject(); + return builder; + } + } + + static class RequestBuilder extends ActionRequestBuilder { + + RequestBuilder(ElasticsearchClient client, SetUpgradeModeAction action) { + super(client, action, new Request()); + } + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeActionRequestTests.java new file mode 100644 index 0000000000000..8b2fd768f210a --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/SetUpgradeModeActionRequestTests.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ml.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.core.ml.action.SetUpgradeModeAction.Request; + +public class SetUpgradeModeActionRequestTests extends AbstractSerializingTestCase { + + @Override + protected Request createTestInstance() { + return new Request(randomBoolean()); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected Writeable.Reader instanceReader() { + return Request::new; + } + + @Override + protected Request doParseInstance(XContentParser parser) { + return Request.PARSER.apply(parser, null); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index e6e1dd1d06825..8711a6c318e58 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -87,6 +87,7 @@ import org.elasticsearch.xpack.core.ml.action.PutFilterAction; import org.elasticsearch.xpack.core.ml.action.PutJobAction; import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; +import org.elasticsearch.xpack.core.ml.action.SetUpgradeModeAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction; @@ -868,6 +869,7 @@ public void testMachineLearningAdminRole() { assertThat(role.cluster().check(PutFilterAction.NAME, request), is(true)); assertThat(role.cluster().check(PutJobAction.NAME, request), is(true)); assertThat(role.cluster().check(RevertModelSnapshotAction.NAME, request), is(true)); + assertThat(role.cluster().check(SetUpgradeModeAction.NAME, request), is(true)); assertThat(role.cluster().check(StartDatafeedAction.NAME, request), is(true)); assertThat(role.cluster().check(StopDatafeedAction.NAME, request), is(true)); assertThat(role.cluster().check(UpdateCalendarJobAction.NAME, request), is(true)); @@ -938,6 +940,7 @@ public void testMachineLearningUserRole() { assertThat(role.cluster().check(PutFilterAction.NAME, request), is(false)); assertThat(role.cluster().check(PutJobAction.NAME, request), is(false)); assertThat(role.cluster().check(RevertModelSnapshotAction.NAME, request), is(false)); + assertThat(role.cluster().check(SetUpgradeModeAction.NAME, request), is(false)); assertThat(role.cluster().check(StartDatafeedAction.NAME, request), is(false)); assertThat(role.cluster().check(StopDatafeedAction.NAME, request), is(false)); assertThat(role.cluster().check(UpdateCalendarJobAction.NAME, request), is(false)); diff --git a/x-pack/plugin/ml/qa/ml-with-security/build.gradle b/x-pack/plugin/ml/qa/ml-with-security/build.gradle index abfed3fd878d0..43421b4591f0a 100644 --- a/x-pack/plugin/ml/qa/ml-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/ml-with-security/build.gradle @@ -93,7 +93,11 @@ integTestRunner { 'ml/validate/Test job config that is invalid only because of the job ID', 'ml/validate_detector/Test invalid detector', 'ml/delete_forecast/Test delete on _all forecasts not allow no forecasts', - 'ml/delete_forecast/Test delete forecast on missing forecast' + 'ml/delete_forecast/Test delete forecast on missing forecast', + 'ml/set_upgrade_mode/Attempt to open job when upgrade_mode is enabled', + 'ml/set_upgrade_mode/Setting upgrade_mode to enabled', + 'ml/set_upgrade_mode/Setting upgrade mode to disabled from enabled', + 'ml/set_upgrade_mode/Test setting upgrade_mode to false when it is already false' ].join(',') } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java new file mode 100644 index 0000000000000..57c9245e2c5b3 --- /dev/null +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java @@ -0,0 +1,173 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; +import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; +import org.elasticsearch.xpack.core.ml.action.SetUpgradeModeAction; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; +import org.elasticsearch.xpack.core.ml.job.config.Job; +import org.elasticsearch.xpack.core.ml.job.config.JobState; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; +import org.junit.After; + +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createDatafeed; +import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.createScheduledJob; +import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.getDataCounts; +import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.getDatafeedStats; +import static org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase.indexDocs; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isEmptyString; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; + +public class SetUpgradeModeIT extends MlNativeAutodetectIntegTestCase { + + @After + public void cleanup() throws Exception { + cleanUp(); + } + + public void testEnableUpgradeMode() throws Exception { + String jobId = "realtime-job-test-enable-upgrade-mode"; + String datafeedId = jobId + "-datafeed"; + startRealtime(jobId); + + // Assert appropriate task state and assignment numbers + assertThat(client().admin() + .cluster() + .prepareListTasks() + .setActions(MlTasks.JOB_TASK_NAME + "[c]", MlTasks.DATAFEED_TASK_NAME + "[c]") + .get() + .getTasks() + .size(), equalTo(2)); + + ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); + + PersistentTasksCustomMetaData persistentTasks = masterClusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertThat(persistentTasks.findTasks(MlTasks.DATAFEED_TASK_NAME, task -> true).size(), equalTo(1)); + assertThat(persistentTasks.findTasks(MlTasks.JOB_TASK_NAME, task -> true).size(), equalTo(1)); + assertThat(MlMetadata.getMlMetadata(masterClusterState).isUpgradeMode(), equalTo(false)); + + // Set the upgrade mode setting + AcknowledgedResponse response = client().execute(SetUpgradeModeAction.INSTANCE, new SetUpgradeModeAction.Request(true)) + .actionGet(); + + assertThat(response.isAcknowledged(), equalTo(true)); + + masterClusterState = client().admin().cluster().prepareState().all().get().getState(); + + // Assert state for tasks still exists and that the upgrade setting is set + persistentTasks = masterClusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertThat(persistentTasks.findTasks(MlTasks.DATAFEED_TASK_NAME, task -> true).size(), equalTo(1)); + assertThat(persistentTasks.findTasks(MlTasks.JOB_TASK_NAME, task -> true).size(), equalTo(1)); + assertThat(MlMetadata.getMlMetadata(masterClusterState).isUpgradeMode(), equalTo(true)); + + assertThat(client().admin() + .cluster() + .prepareListTasks() + .setActions(MlTasks.JOB_TASK_NAME + "[c]", MlTasks.DATAFEED_TASK_NAME + "[c]") + .get() + .getTasks(), is(empty())); + + GetJobsStatsAction.Response.JobStats jobStats = getJobStats(jobId).get(0); + assertThat(jobStats.getState(), equalTo(JobState.OPENED)); + assertThat(jobStats.getAssignmentExplanation(), equalTo(MlTasks.AWAITING_UPGRADE.getExplanation())); + assertThat(jobStats.getNode(), is(nullValue())); + + GetDatafeedsStatsAction.Response.DatafeedStats datafeedStats = getDatafeedStats(datafeedId); + assertThat(datafeedStats.getDatafeedState(), equalTo(DatafeedState.STARTED)); + assertThat(datafeedStats.getAssignmentExplanation(), equalTo(MlTasks.AWAITING_UPGRADE.getExplanation())); + assertThat(datafeedStats.getNode(), is(nullValue())); + + Job.Builder job = createScheduledJob("job-should-not-open"); + registerJob(job); + putJob(job); + ElasticsearchStatusException statusException = expectThrows(ElasticsearchStatusException.class, () -> openJob(job.getId())); + assertThat(statusException.status(), equalTo(RestStatus.TOO_MANY_REQUESTS)); + assertThat(statusException.getMessage(), equalTo("Cannot open jobs when upgrade mode is enabled")); + + //Disable the setting + response = client().execute(SetUpgradeModeAction.INSTANCE, new SetUpgradeModeAction.Request(false)) + .actionGet(); + + assertThat(response.isAcknowledged(), equalTo(true)); + + masterClusterState = client().admin().cluster().prepareState().all().get().getState(); + + persistentTasks = masterClusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertThat(persistentTasks.findTasks(MlTasks.DATAFEED_TASK_NAME, task -> true).size(), equalTo(1)); + assertThat(persistentTasks.findTasks(MlTasks.JOB_TASK_NAME, task -> true).size(), equalTo(1)); + assertThat(MlMetadata.getMlMetadata(masterClusterState).isUpgradeMode(), equalTo(false)); + + assertBusy(() -> assertThat(client().admin() + .cluster() + .prepareListTasks() + .setActions(MlTasks.JOB_TASK_NAME + "[c]", MlTasks.DATAFEED_TASK_NAME + "[c]") + .get() + .getTasks() + .size(), equalTo(2))); + + jobStats = getJobStats(jobId).get(0); + assertThat(jobStats.getState(), equalTo(JobState.OPENED)); + assertThat(jobStats.getAssignmentExplanation(), isEmptyString()); + assertThat(jobStats.getNode(), is(not(nullValue()))); + + datafeedStats = getDatafeedStats(datafeedId); + assertThat(datafeedStats.getDatafeedState(), equalTo(DatafeedState.STARTED)); + assertThat(datafeedStats.getAssignmentExplanation(), isEmptyString()); + assertThat(datafeedStats.getNode(), is(not(nullValue()))); + } + + private void startRealtime(String jobId) throws Exception { + client().admin().indices().prepareCreate("data") + .addMapping("type", "time", "type=date") + .get(); + long numDocs1 = randomIntBetween(32, 2048); + long now = System.currentTimeMillis(); + long lastWeek = now - 604800000; + indexDocs(logger, "data", numDocs1, lastWeek, now); + + Job.Builder job = createScheduledJob(jobId); + registerJob(job); + putJob(job); + openJob(job.getId()); + assertBusy(() -> assertEquals(getJobStats(job.getId()).get(0).getState(), JobState.OPENED)); + + DatafeedConfig datafeedConfig = createDatafeed(job.getId() + "-datafeed", job.getId(), Collections.singletonList("data")); + registerDatafeed(datafeedConfig); + putDatafeed(datafeedConfig); + startDatafeed(datafeedConfig.getId(), 0L, null); + assertBusy(() -> { + DataCounts dataCounts = getDataCounts(job.getId()); + assertThat(dataCounts.getProcessedRecordCount(), equalTo(numDocs1)); + assertThat(dataCounts.getOutOfOrderTimeStampCount(), equalTo(0L)); + }); + + long numDocs2 = randomIntBetween(2, 64); + now = System.currentTimeMillis(); + indexDocs(logger, "data", numDocs2, now + 5000, now + 6000); + assertBusy(() -> { + DataCounts dataCounts = getDataCounts(job.getId()); + assertThat(dataCounts.getProcessedRecordCount(), equalTo(numDocs1 + numDocs2)); + assertThat(dataCounts.getOutOfOrderTimeStampCount(), equalTo(0L)); + }, 30, TimeUnit.SECONDS); + } + +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 39316389b0496..e5376bccb1745 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -97,6 +97,7 @@ import org.elasticsearch.xpack.core.ml.action.PutFilterAction; import org.elasticsearch.xpack.core.ml.action.PutJobAction; import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; +import org.elasticsearch.xpack.core.ml.action.SetUpgradeModeAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction; @@ -152,6 +153,7 @@ import org.elasticsearch.xpack.ml.action.TransportPutFilterAction; import org.elasticsearch.xpack.ml.action.TransportPutJobAction; import org.elasticsearch.xpack.ml.action.TransportRevertModelSnapshotAction; +import org.elasticsearch.xpack.ml.action.TransportSetUpgradeModeAction; import org.elasticsearch.xpack.ml.action.TransportStartDatafeedAction; import org.elasticsearch.xpack.ml.action.TransportStopDatafeedAction; import org.elasticsearch.xpack.ml.action.TransportUpdateCalendarJobAction; @@ -190,6 +192,7 @@ import org.elasticsearch.xpack.ml.rest.RestDeleteExpiredDataAction; import org.elasticsearch.xpack.ml.rest.RestFindFileStructureAction; import org.elasticsearch.xpack.ml.rest.RestMlInfoAction; +import org.elasticsearch.xpack.ml.rest.RestSetUpgradeModeAction; import org.elasticsearch.xpack.ml.rest.calendar.RestDeleteCalendarAction; import org.elasticsearch.xpack.ml.rest.calendar.RestDeleteCalendarEventAction; import org.elasticsearch.xpack.ml.rest.calendar.RestDeleteCalendarJobAction; @@ -425,7 +428,7 @@ public Collection createComponents(Client client, ClusterService cluster threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME)); AutodetectProcessManager autodetectProcessManager = new AutodetectProcessManager(env, settings, client, threadPool, jobManager, jobResultsProvider, jobResultsPersister, jobDataCountsPersister, autodetectProcessFactory, - normalizerFactory, xContentRegistry, auditor); + normalizerFactory, xContentRegistry, auditor, clusterService); this.autodetectProcessManager.set(autodetectProcessManager); DatafeedJobBuilder datafeedJobBuilder = new DatafeedJobBuilder(client, settings, xContentRegistry, auditor, System::currentTimeMillis); @@ -542,7 +545,8 @@ public List getRestHandlers(Settings settings, RestController restC new RestPutCalendarJobAction(settings, restController), new RestGetCalendarEventsAction(settings, restController), new RestPostCalendarEventAction(settings, restController), - new RestFindFileStructureAction(settings, restController) + new RestFindFileStructureAction(settings, restController), + new RestSetUpgradeModeAction(settings, restController) ); } @@ -600,7 +604,8 @@ public List getRestHandlers(Settings settings, RestController restC new ActionHandler<>(GetCalendarEventsAction.INSTANCE, TransportGetCalendarEventsAction.class), new ActionHandler<>(PostCalendarEventsAction.INSTANCE, TransportPostCalendarEventsAction.class), new ActionHandler<>(PersistJobAction.INSTANCE, TransportPersistJobAction.class), - new ActionHandler<>(FindFileStructureAction.INSTANCE, TransportFindFileStructureAction.class) + new ActionHandler<>(FindFileStructureAction.INSTANCE, TransportFindFileStructureAction.class), + new ActionHandler<>(SetUpgradeModeAction.INSTANCE, TransportSetUpgradeModeAction.class) ); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java index fa7f96236af26..48fbf9566fd1e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlInfoAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.MachineLearningField; +import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.MlInfoAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; @@ -39,6 +40,7 @@ protected void doExecute(Task task, MlInfoAction.Request request, ActionListener Map info = new HashMap<>(); info.put("defaults", defaults()); info.put("limits", limits()); + info.put(MlMetadata.UPGRADE_MODE.getPreferredName(), upgradeMode()); listener.onResponse(new MlInfoAction.Response(info)); } @@ -49,6 +51,10 @@ private Map defaults() { return defaults; } + private boolean upgradeMode() { + return MlMetadata.getMlMetadata(clusterService.state()).isUpgradeMode(); + } + private Map anomalyDetectorsDefaults() { Map defaults = new HashMap<>(); defaults.put(AnalysisLimits.MODEL_MEMORY_LIMIT.getPreferredName(), defaultModelMemoryLimit()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 36381a5837864..d365733eac0b4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -40,6 +40,7 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.ml.MlMetaIndex; +import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -66,6 +67,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.core.ml.MlTasks.AWAITING_UPGRADE; import static org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE; /* @@ -550,6 +552,7 @@ public static class OpenJobPersistentTasksExecutor extends PersistentTasksExecut private final AutodetectProcessManager autodetectProcessManager; private final MlMemoryTracker memoryTracker; private final Client client; + private final ClusterService clusterService; private volatile int maxConcurrentJobAllocations; private volatile int maxMachineMemoryPercent; @@ -566,6 +569,7 @@ public OpenJobPersistentTasksExecutor(Settings settings, ClusterService clusterS this.maxConcurrentJobAllocations = MachineLearning.CONCURRENT_JOB_ALLOCATIONS.get(settings); this.maxMachineMemoryPercent = MachineLearning.MAX_MACHINE_MEMORY_PERCENT.get(settings); this.maxLazyMLNodes = MachineLearning.MAX_LAZY_ML_NODES.get(settings); + this.clusterService = clusterService; clusterService.getClusterSettings() .addSettingsUpdateConsumer(MachineLearning.CONCURRENT_JOB_ALLOCATIONS, this::setMaxConcurrentJobAllocations); clusterService.getClusterSettings() @@ -583,6 +587,11 @@ public PersistentTasksCustomMetaData.Assignment getAssignment(OpenJobAction.JobP return AWAITING_MIGRATION; } + // If we are waiting for an upgrade to complete, we should not assign to a node + if (MlMetadata.getMlMetadata(clusterState).isUpgradeMode()) { + return AWAITING_UPGRADE; + } + PersistentTasksCustomMetaData.Assignment assignment = selectLeastLoadedMlNode(params.getJobId(), params.getJob(), clusterState, @@ -613,6 +622,10 @@ public void validate(OpenJobAction.JobParams params, ClusterState clusterState) // If we already know that we can't find an ml node because all ml nodes are running at capacity or // simply because there are no ml nodes in the cluster then we fail quickly here: PersistentTasksCustomMetaData.Assignment assignment = getAssignment(params, clusterState); + if (assignment.equals(AWAITING_UPGRADE)) { + throw makeCurrentlyBeingUpgradedException(logger, params.getJobId(), assignment.getExplanation()); + } + if (assignment.getExecutorNode() == null && assignment.equals(AWAITING_LAZY_ASSIGNMENT) == false) { throw makeNoSuitableNodesException(logger, params.getJobId(), assignment.getExplanation()); } @@ -631,14 +644,18 @@ protected void nodeOperation(AllocatedPersistentTask task, OpenJobAction.JobPara } String jobId = jobTask.getJobId(); - autodetectProcessManager.openJob(jobTask, clusterState, e2 -> { + autodetectProcessManager.openJob(jobTask, clusterState, (e2, shouldFinalizeJob) -> { if (e2 == null) { - FinalizeJobExecutionAction.Request finalizeRequest = new FinalizeJobExecutionAction.Request(new String[]{jobId}); - executeAsyncWithOrigin(client, ML_ORIGIN, FinalizeJobExecutionAction.INSTANCE, finalizeRequest, + if (shouldFinalizeJob) { + FinalizeJobExecutionAction.Request finalizeRequest = new FinalizeJobExecutionAction.Request(new String[]{jobId}); + executeAsyncWithOrigin(client, ML_ORIGIN, FinalizeJobExecutionAction.INSTANCE, finalizeRequest, ActionListener.wrap( - response -> task.markAsCompleted(), - e -> logger.error("error finalizing job [" + jobId + "]", e) + response -> task.markAsCompleted(), + e -> logger.error("error finalizing job [" + jobId + "]", e) )); + } else { + task.markAsCompleted(); + } } else { task.markAsFailed(e2); } @@ -649,7 +666,7 @@ protected void nodeOperation(AllocatedPersistentTask task, OpenJobAction.JobPara protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId, PersistentTasksCustomMetaData.PersistentTask persistentTask, Map headers) { - return new JobTask(persistentTask.getParams().getJobId(), id, type, action, parentTaskId, headers); + return new JobTask(persistentTask.getParams().getJobId(), id, type, action, parentTaskId, headers); } void setMaxConcurrentJobAllocations(int maxConcurrentJobAllocations) { @@ -701,7 +718,6 @@ void killJob(String reason) { void closeJob(String reason) { autodetectProcessManager.closeJob(this, false, reason); } - } /** @@ -772,4 +788,10 @@ static ElasticsearchException makeNoSuitableNodesException(Logger logger, String return new ElasticsearchStatusException("Could not open job because no ML nodes with sufficient capacity were found", RestStatus.TOO_MANY_REQUESTS, detail); } + + static ElasticsearchException makeCurrentlyBeingUpgradedException(Logger logger, String jobId, String explanation) { + String msg = "Cannot open jobs when upgrade mode is enabled"; + logger.warn("[{}] {}", jobId, msg); + return new ElasticsearchStatusException(msg, RestStatus.TOO_MANY_REQUESTS); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java new file mode 100644 index 0000000000000..1834b0b3c0616 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java @@ -0,0 +1,271 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.action; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.persistent.PersistentTasksClusterService; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.IsolateDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.SetUpgradeModeAction; +import org.elasticsearch.xpack.ml.utils.TypedChainTaskExecutor; + +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.core.ml.MlTasks.AWAITING_UPGRADE; +import static org.elasticsearch.xpack.core.ml.MlTasks.DATAFEED_TASK_NAME; +import static org.elasticsearch.xpack.core.ml.MlTasks.JOB_TASK_NAME; + +public class TransportSetUpgradeModeAction extends TransportMasterNodeAction { + + private final AtomicBoolean isRunning = new AtomicBoolean(false); + private final PersistentTasksClusterService persistentTasksClusterService; + private final PersistentTasksService persistentTasksService; + private final ClusterService clusterService; + private final Client client; + + @Inject + public TransportSetUpgradeModeAction(TransportService transportService, ThreadPool threadPool, ClusterService clusterService, + PersistentTasksClusterService persistentTasksClusterService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, Client client, + PersistentTasksService persistentTasksService) { + super(SetUpgradeModeAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + SetUpgradeModeAction.Request::new); + this.persistentTasksClusterService = persistentTasksClusterService; + this.clusterService = clusterService; + this.client = client; + this.persistentTasksService = persistentTasksService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + @Override + protected void masterOperation(SetUpgradeModeAction.Request request, ClusterState state, ActionListener listener) + throws Exception { + + // Don't want folks spamming this endpoint while it is in progress, only allow one request to be handled at a time + if (isRunning.compareAndSet(false, true) == false) { + String msg = "Attempted to set [upgrade_mode] to [" + + request.isEnabled() + "] from [" + MlMetadata.getMlMetadata(state).isUpgradeMode() + + "] while previous request was processing."; + Exception detail = new IllegalStateException(msg); + listener.onFailure(new ElasticsearchStatusException( + "Cannot change [upgrade_mode]. Previous request is still being processed.", + RestStatus.TOO_MANY_REQUESTS, + detail)); + return; + } + + // Noop, nothing for us to do, simply return fast to the caller + if (request.isEnabled() == MlMetadata.getMlMetadata(state).isUpgradeMode()) { + isRunning.set(false); + listener.onResponse(new AcknowledgedResponse(true)); + return; + } + + ActionListener wrappedListener = ActionListener.wrap( + r -> { + isRunning.set(false); + listener.onResponse(r); + }, + e -> { + isRunning.set(false); + listener.onFailure(e); + } + ); + final PersistentTasksCustomMetaData tasksCustomMetaData = state.metaData().custom(PersistentTasksCustomMetaData.TYPE); + + // <4> We have unassigned the tasks, respond to the listener. + ActionListener>> unassignPersistentTasksListener = ActionListener.wrap( + unassigndPersistentTasks -> { + // Wait for our tasks to all stop + client.admin() + .cluster() + .prepareListTasks() + .setActions(DATAFEED_TASK_NAME + "[c]", JOB_TASK_NAME + "[c]") + .setWaitForCompletion(true) + .setTimeout(request.timeout()).execute(ActionListener.wrap( + r -> wrappedListener.onResponse(new AcknowledgedResponse(true)), + wrappedListener::onFailure)); + }, + wrappedListener::onFailure + ); + + // <3> After isolating the datafeeds, unassign the tasks + ActionListener> isolateDatafeedListener = ActionListener.wrap( + isolatedDatafeeds -> unassignPersistentTasks(tasksCustomMetaData, unassignPersistentTasksListener), + wrappedListener::onFailure + ); + + /* + <2> Handle the cluster response and act accordingly + <.1> + If we are enabling the option, we need to isolate the datafeeds so we can unassign the ML Jobs + + <.2> + If we are disabling the option, we need to wait to make sure all the job and datafeed tasks no longer have the upgrade mode + assignment + + + We make no guarantees around which tasks will be running again once upgrade_mode is disabled. + Scenario: + * Before `upgrade_mode: true`, there were unassigned tasks because node task assignment was maxed out (tasks A, B) + * There were assigned tasks executing fine (tasks C, D) + * While `upgrade_mode: true` all are attempting to be re-assigned, but cannot and end up with the AWAITING_UPGRADE reason + * `upgrade_mode: false` opens the flood gates, all tasks are still attempting to re-assign + * A or B could be re-assigned before either C or D. Thus, previously erred tasks are now executing fine, and previously + executing tasks are now unassigned due to resource exhaustion. + + We make no promises which tasks are executing if resources of the cluster are exhausted. + + + */ + ActionListener clusterStateUpdateListener = ActionListener.wrap( + acknowledgedResponse -> { + // State change was not acknowledged, we either timed out or ran into some exception + // We should not continue and alert failure to the end user + if (acknowledgedResponse.isAcknowledged() == false) { + wrappedListener.onFailure(new ElasticsearchTimeoutException("Unknown error occurred while updating cluster state")); + return; + } + // Did we change from disabled -> enabled? + if (request.isEnabled()) { + isolateDatafeeds(tasksCustomMetaData, isolateDatafeedListener); + } else { + persistentTasksService.waitForPersistentTasksCondition( + (persistentTasksCustomMetaData) -> + // Wait for jobs to not be "Awaiting upgrade" + persistentTasksCustomMetaData.findTasks(JOB_TASK_NAME, + (t) -> t.getAssignment().equals(AWAITING_UPGRADE)) + .isEmpty() && + + // Datafeeds to wait for a non-"Awaiting upgrade" assignment and for the job task allocations to converge + // If we do not wait, deleting datafeeds, or attempting to unallocate them again causes issues as the + // job's task allocationId could have changed during either process. + persistentTasksCustomMetaData.findTasks(DATAFEED_TASK_NAME, + (t) -> + t.getAssignment().equals(AWAITING_UPGRADE) || + t.getAssignment().getExplanation().contains("state is stale")) + .isEmpty(), + request.timeout(), + ActionListener.wrap(r -> wrappedListener.onResponse(new AcknowledgedResponse(true)), wrappedListener::onFailure) + ); + } + }, + wrappedListener::onFailure + ); + + //<1> Change MlMetadata to indicate that upgrade_mode is now enabled + clusterService.submitStateUpdateTask("ml-set-upgrade-mode", + new AckedClusterStateUpdateTask(request, clusterStateUpdateListener) { + + @Override + protected AcknowledgedResponse newResponse(boolean acknowledged) { + return new AcknowledgedResponse(acknowledged); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + MlMetadata.Builder builder = new MlMetadata.Builder(currentState.metaData().custom(MlMetadata.TYPE)); + builder.isUpgradeMode(request.isEnabled()); + ClusterState.Builder newState = ClusterState.builder(currentState); + newState.metaData(MetaData.builder(currentState.getMetaData()).putCustom(MlMetadata.TYPE, builder.build()).build()); + return newState.build(); + } + }); + } + + @Override + protected ClusterBlockException checkBlock(SetUpgradeModeAction.Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + /** + * Unassigns all Job and Datafeed tasks. + *

+ * The reason for unassigning both types is that we want the Datafeed to attempt re-assignment once `upgrade_mode` is + * disabled. + *

+ * If we do not force an allocation change for the Datafeed tasks, they will never start again, since they were isolated. + *

+ * Datafeed tasks keep the state as `started` and Jobs stay `opened` + * + * @param tasksCustomMetaData Current state of persistent tasks + * @param listener Alerted when tasks are unassignd + */ + private void unassignPersistentTasks(PersistentTasksCustomMetaData tasksCustomMetaData, + ActionListener>> listener) { + List> datafeedAndJobTasks = tasksCustomMetaData + .tasks() + .stream() + .filter(persistentTask -> (persistentTask.getTaskName().equals(MlTasks.JOB_TASK_NAME) || + persistentTask.getTaskName().equals(MlTasks.DATAFEED_TASK_NAME))) + .collect(Collectors.toList()); + + TypedChainTaskExecutor> chainTaskExecutor = + new TypedChainTaskExecutor<>(client.threadPool().executor(executor()), r -> true, ex -> true); + + for (PersistentTask task : datafeedAndJobTasks) { + chainTaskExecutor.add( + chainedTask -> persistentTasksClusterService.unassignPersistentTask(task.getId(), + task.getAllocationId(), + AWAITING_UPGRADE.getExplanation(), + chainedTask) + ); + } + chainTaskExecutor.execute(listener); + } + + private void isolateDatafeeds(PersistentTasksCustomMetaData tasksCustomMetaData, + ActionListener> listener) { + Set datafeedsToIsolate = MlTasks.startedDatafeedIds(tasksCustomMetaData); + + TypedChainTaskExecutor isolateDatafeedsExecutor = + new TypedChainTaskExecutor<>(client.threadPool().executor(executor()), r -> true, ex -> true); + + datafeedsToIsolate.forEach(datafeedId -> { + IsolateDatafeedAction.Request isolationRequest = new IsolateDatafeedAction.Request(datafeedId); + isolateDatafeedsExecutor.add(isolateListener -> + executeAsyncWithOrigin(client, ML_ORIGIN, IsolateDatafeedAction.INSTANCE, isolationRequest, isolateListener) + ); + }); + + isolateDatafeedsExecutor.execute(listener); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index b1ee1776bfe25..e29fc8ef5c326 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -7,6 +7,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -14,6 +15,8 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; @@ -22,6 +25,8 @@ import java.util.List; import java.util.Objects; +import static org.elasticsearch.xpack.core.ml.MlTasks.AWAITING_UPGRADE; + public class DatafeedNodeSelector { private static final Logger LOGGER = LogManager.getLogger(DatafeedNodeSelector.class); @@ -45,6 +50,13 @@ public DatafeedNodeSelector(ClusterState clusterState, IndexNameExpressionResolv } public void checkDatafeedTaskCanBeCreated() { + if (MlMetadata.getMlMetadata(clusterState).isUpgradeMode()) { + String msg = "Unable to start datafeed [" + datafeedId +"] explanation [" + AWAITING_UPGRADE.getExplanation() + "]"; + LOGGER.debug(msg); + Exception detail = new IllegalStateException(msg); + throw new ElasticsearchStatusException("Could not start datafeed [" + datafeedId +"] as indices are being upgraded", + RestStatus.TOO_MANY_REQUESTS, detail); + } AssignmentFailure assignmentFailure = checkAssignment(); if (assignmentFailure != null && assignmentFailure.isCriticalForTaskCreation) { String msg = "No node found to start datafeed [" + datafeedId + "], " + @@ -55,6 +67,10 @@ public void checkDatafeedTaskCanBeCreated() { } public PersistentTasksCustomMetaData.Assignment selectNode() { + if (MlMetadata.getMlMetadata(clusterState).isUpgradeMode()) { + return AWAITING_UPGRADE; + } + AssignmentFailure assignmentFailure = checkAssignment(); if (assignmentFailure == null) { return new PersistentTasksCustomMetaData.Assignment(jobTask.getExecutorNode(), ""); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java index c58e0e177a6b0..b3f765d89ce1a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java @@ -54,7 +54,6 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; -import java.util.function.Consumer; public class AutodetectCommunicator implements Closeable { @@ -67,7 +66,7 @@ public class AutodetectCommunicator implements Closeable { private final StateStreamer stateStreamer; private final DataCountsReporter dataCountsReporter; private final AutoDetectResultProcessor autoDetectResultProcessor; - private final Consumer onFinishHandler; + private final BiConsumer onFinishHandler; private final ExecutorService autodetectWorkerExecutor; private final NamedXContentRegistry xContentRegistry; private final boolean includeTokensField; @@ -76,7 +75,7 @@ public class AutodetectCommunicator implements Closeable { AutodetectCommunicator(Job job, Environment environment, AutodetectProcess process, StateStreamer stateStreamer, DataCountsReporter dataCountsReporter, AutoDetectResultProcessor autoDetectResultProcessor, - Consumer onFinishHandler, NamedXContentRegistry xContentRegistry, + BiConsumer onFinishHandler, NamedXContentRegistry xContentRegistry, ExecutorService autodetectWorkerExecutor) { this.job = job; this.environment = environment; @@ -160,7 +159,7 @@ public void close(boolean restart, String reason) { } autoDetectResultProcessor.awaitCompletion(); } finally { - onFinishHandler.accept(restart ? new ElasticsearchException(reason) : null); + onFinishHandler.accept(restart ? new ElasticsearchException(reason) : null, true); } LOGGER.info("[{}] job closed", job.getId()); return null; @@ -183,6 +182,10 @@ public void close(boolean restart, String reason) { } public void killProcess(boolean awaitCompletion, boolean finish) throws IOException { + killProcess(awaitCompletion, finish, true); + } + + public void killProcess(boolean awaitCompletion, boolean finish, boolean finalizeJob) throws IOException { try { processKilled = true; autoDetectResultProcessor.setProcessKilled(); @@ -198,7 +201,7 @@ public void killProcess(boolean awaitCompletion, boolean finish) throws IOExcept } } finally { if (finish) { - onFinishHandler.accept(null); + onFinishHandler.accept(null, finalizeJob); } destroyCategorizationAnalyzer(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index dd3656ee04b67..21aa08e14f217 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -11,7 +11,10 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; @@ -32,6 +35,7 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.GetFiltersAction; import org.elasticsearch.xpack.core.ml.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.calendars.ScheduledEvent; @@ -93,7 +97,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -public class AutodetectProcessManager { +public class AutodetectProcessManager implements ClusterStateListener { // We should be able from the job config to estimate the memory/cpu a job needs to have, // and if we know that then we can prior to assigning a job to a node fail based on the @@ -135,11 +139,13 @@ public class AutodetectProcessManager { private final Auditor auditor; + private volatile boolean upgradeInProgress; + public AutodetectProcessManager(Environment environment, Settings settings, Client client, ThreadPool threadPool, JobManager jobManager, JobResultsProvider jobResultsProvider, JobResultsPersister jobResultsPersister, JobDataCountsPersister jobDataCountsPersister, AutodetectProcessFactory autodetectProcessFactory, NormalizerFactory normalizerFactory, - NamedXContentRegistry xContentRegistry, Auditor auditor) { + NamedXContentRegistry xContentRegistry, Auditor auditor, ClusterService clusterService) { this.environment = environment; this.client = client; this.threadPool = threadPool; @@ -153,6 +159,7 @@ public AutodetectProcessManager(Environment environment, Settings settings, Clie this.jobDataCountsPersister = jobDataCountsPersister; this.auditor = auditor; this.nativeStorageProvider = new NativeStorageProvider(environment, MIN_DISK_SPACE_OFF_HEAP.get(settings)); + clusterService.addListener(this); } public void onNodeStartup() { @@ -182,6 +189,7 @@ public void killProcess(JobTask jobTask, boolean awaitCompletion, String reason) .setAwaitCompletion(awaitCompletion) .setFinish(true) .setReason(reason) + .setShouldFinalizeJob(upgradeInProgress == false) .kill(); } else { // If the process is missing but the task exists this is most likely @@ -415,7 +423,7 @@ public void onFailure(Exception e) { } } - public void openJob(JobTask jobTask, ClusterState clusterState, Consumer closeHandler) { + public void openJob(JobTask jobTask, ClusterState clusterState, BiConsumer closeHandler) { String jobId = jobTask.getJobId(); logger.info("Opening job [{}]", jobId); @@ -426,7 +434,7 @@ public void openJob(JobTask jobTask, ClusterState clusterState, Consumer { if (job.getJobVersion() == null) { closeHandler.accept(ExceptionsHelper.badRequestException("Cannot open job [" + jobId - + "] because jobs created prior to version 5.5 are not supported")); + + "] because jobs created prior to version 5.5 are not supported"), true); return; } @@ -436,7 +444,7 @@ public void openJob(JobTask jobTask, ClusterState clusterState, Consumer closeHandler.accept(e1)); + setJobState(jobTask, JobState.FAILED, e2 -> closeHandler.accept(e1, true)); } } } }); }, e1 -> { logger.warn("Failed to gather information required to open job [" + jobId + "]", e1); - setJobState(jobTask, JobState.FAILED, e2 -> closeHandler.accept(e1)); + setJobState(jobTask, JobState.FAILED, e2 -> closeHandler.accept(e1, true)); }); }, - closeHandler + e -> closeHandler.accept(e, true) )); }, - closeHandler); + e -> closeHandler.accept(e, true)); // Make sure the state index and alias exist ActionListener resultsMappingUpdateHandler = ActionListener.wrap( ack -> AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary(client, clusterState, stateAliasHandler), - closeHandler + e -> closeHandler.accept(e, true) ); // Try adding the results doc mapping - this updates to the latest version if an old mapping is present @@ -492,7 +500,10 @@ protected void doRun() { ElasticsearchMappings::resultsMapping, client, clusterState, resultsMappingUpdateHandler); } - private void createProcessAndSetRunning(ProcessContext processContext, Job job, AutodetectParams params, Consumer handler) { + private void createProcessAndSetRunning(ProcessContext processContext, + Job job, + AutodetectParams params, + BiConsumer handler) { // At this point we lock the process context until the process has been started. // The reason behind this is to ensure closing the job does not happen before // the process is started as that can result to the job getting seemingly closed @@ -509,7 +520,7 @@ private void createProcessAndSetRunning(ProcessContext processContext, Job job, } } - AutodetectCommunicator create(JobTask jobTask, Job job, AutodetectParams autodetectParams, Consumer handler) { + AutodetectCommunicator create(JobTask jobTask, Job job, AutodetectParams autodetectParams, BiConsumer handler) { // Closing jobs can still be using some or all threads in MachineLearning.AUTODETECT_THREAD_POOL_NAME // that an open job uses, so include them too when considering if enough threads are available. int currentRunningJobs = processByAllocation.size(); @@ -771,6 +782,11 @@ public ByteSizeValue getMinLocalStorageAvailable() { return nativeStorageProvider.getMinLocalStorageAvailable(); } + @Override + public void clusterChanged(ClusterChangedEvent event) { + upgradeInProgress = MlMetadata.getMlMetadata(event.state()).isUpgradeMode(); + } + /* * The autodetect native process can only handle a single operation at a time. In order to guarantee that, all * operations are initially added to a queue and a worker thread from ml autodetect threadpool will process each diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/ProcessContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/ProcessContext.java index 8771712dfde13..cf93848c6f060 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/ProcessContext.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/ProcessContext.java @@ -86,6 +86,7 @@ class KillBuilder { private boolean awaitCompletion; private boolean finish; private boolean silent; + private boolean shouldFinalizeJob = true; private String reason; KillBuilder setAwaitCompletion(boolean awaitCompletion) { @@ -108,6 +109,11 @@ KillBuilder setReason(String reason) { return this; } + KillBuilder setShouldFinalizeJob(boolean shouldFinalizeJob) { + this.shouldFinalizeJob = shouldFinalizeJob; + return this; + } + void kill() { if (autodetectCommunicator == null) { return; @@ -123,7 +129,7 @@ void kill() { } } try { - autodetectCommunicator.killProcess(awaitCompletion, finish); + autodetectCommunicator.killProcess(awaitCompletion, finish, shouldFinalizeJob); } catch (IOException e) { LOGGER.error("[{}] Failed to kill autodetect process for job", jobId); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestSetUpgradeModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestSetUpgradeModeAction.java new file mode 100644 index 0000000000000..af988a5a68029 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestSetUpgradeModeAction.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ml.rest; + +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.ml.action.SetUpgradeModeAction; +import org.elasticsearch.xpack.ml.MachineLearning; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public class RestSetUpgradeModeAction extends BaseRestHandler { + + private static final DeprecationLogger deprecationLogger = + new DeprecationLogger(LogManager.getLogger(RestSetUpgradeModeAction.class)); + + public RestSetUpgradeModeAction(Settings settings, RestController controller) { + super(settings); + // TODO: remove deprecated endpoint in 8.0.0 + controller.registerWithDeprecatedHandler( + POST, MachineLearning.BASE_PATH + "set_upgrade_mode", this, + POST, MachineLearning.PRE_V7_BASE_PATH + "set_upgrade_mode", deprecationLogger); + } + + @Override + public String getName() { + return "ml_set_upgrade_mode_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + SetUpgradeModeAction.Request request = + new SetUpgradeModeAction.Request(restRequest.paramAsBoolean("enabled", false)); + request.timeout(restRequest.paramAsTime("timeout", request.timeout())); + request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); + return channel -> client.execute(SetUpgradeModeAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index 09b43f5253cda..4b81cbb2dd6d3 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; @@ -52,6 +53,7 @@ public class DatafeedNodeSelectorTests extends ESTestCase { private DiscoveryNodes nodes; private ClusterState clusterState; private PersistentTasksCustomMetaData tasks; + private MlMetadata mlMetadata; @Before public void init() { @@ -60,6 +62,7 @@ public void init() { .add(new DiscoveryNode("node_name", "node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT)) .build(); + mlMetadata = new MlMetadata.Builder().build(); } public void testSelectNode_GivenJobIsOpened() { @@ -283,6 +286,39 @@ public void testSelectNode_GivenJobOpeningAndIndexDoesNotExist() { + "[cannot start datafeed [datafeed_id] because index [not_foo] does not exist, is closed, or is still initializing.]")); } + public void testSelectNode_GivenMlUpgradeMode() { + Job job = createScheduledJob("job_id").build(new Date()); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); + + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); + tasks = tasksBuilder.build(); + mlMetadata = new MlMetadata.Builder().isUpgradeMode(true).build(); + + givenClusterState("foo", 1, 0); + + PersistentTasksCustomMetaData.Assignment result = + new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()).selectNode(); + assertThat(result, equalTo(MlTasks.AWAITING_UPGRADE)); + } + + public void testCheckDatafeedTaskCanBeCreated_GivenMlUpgradeMode() { + Job job = createScheduledJob("job_id").build(new Date()); + DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); + + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(); + addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); + tasks = tasksBuilder.build(); + mlMetadata = new MlMetadata.Builder().isUpgradeMode(true).build(); + + givenClusterState("foo", 1, 0); + + ElasticsearchException e = expectThrows(ElasticsearchException.class, + () -> new DatafeedNodeSelector(clusterState, resolver, df.getId(), df.getJobId(), df.getIndices()) + .checkDatafeedTaskCanBeCreated()); + assertThat(e.getMessage(), equalTo("Could not start datafeed [datafeed_id] as indices are being upgraded")); + } + private void givenClusterState(String index, int numberOfShards, int numberOfReplicas) { List> states = new ArrayList<>(1); states.add(new Tuple<>(0, ShardRoutingState.STARTED)); @@ -299,6 +335,7 @@ private void givenClusterState(String index, int numberOfShards, int numberOfRep clusterState = ClusterState.builder(new ClusterName("cluster_name")) .metaData(new MetaData.Builder() .putCustom(PersistentTasksCustomMetaData.TYPE, tasks) + .putCustom(MlMetadata.TYPE, mlMetadata) .put(indexMetaData, false)) .nodes(nodes) .routingTable(generateRoutingTable(indexMetaData, states)) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicatorTests.java index fda96ca29a695..6705773dc7329 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicatorTests.java @@ -48,7 +48,7 @@ import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Consumer; +import java.util.function.BiConsumer; import static org.elasticsearch.mock.orig.Mockito.doAnswer; import static org.hamcrest.Matchers.equalTo; @@ -193,7 +193,7 @@ public void testKill() throws IOException, TimeoutException { AtomicBoolean finishCalled = new AtomicBoolean(false); AutodetectCommunicator communicator = createAutodetectCommunicator(executorService, process, resultProcessor, - e -> finishCalled.set(true)); + (e, b) -> finishCalled.set(true)); boolean awaitCompletion = randomBoolean(); boolean finish = randomBoolean(); communicator.killProcess(awaitCompletion, finish); @@ -233,7 +233,7 @@ private AutodetectProcess mockAutodetectProcessWithOutputStream() throws IOExcep @SuppressWarnings("unchecked") private AutodetectCommunicator createAutodetectCommunicator(ExecutorService executorService, AutodetectProcess autodetectProcess, AutoDetectResultProcessor autoDetectResultProcessor, - Consumer finishHandler) throws IOException { + BiConsumer finishHandler) throws IOException { DataCountsReporter dataCountsReporter = mock(DataCountsReporter.class); doAnswer(invocation -> { ((ActionListener) invocation.getArguments()[0]).onResponse(true); @@ -259,7 +259,7 @@ private AutodetectCommunicator createAutodetectCommunicator(AutodetectProcess au return null; }).when(executorService).execute(any(Runnable.class)); - return createAutodetectCommunicator(executorService, autodetectProcess, autoDetectResultProcessor, e -> {}); + return createAutodetectCommunicator(executorService, autodetectProcess, autoDetectResultProcessor, (e, b) -> {}); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index ba319f1a90781..82788d4500b09 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasOrIndex; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -116,6 +117,7 @@ public class AutodetectProcessManagerTests extends ESTestCase { private NormalizerFactory normalizerFactory; private Auditor auditor; private ClusterState clusterState; + private ClusterService clusterService; private DataCounts dataCounts = new DataCounts("foo"); private ModelSizeStats modelSizeStats = new ModelSizeStats.Builder("foo").build(); @@ -135,6 +137,7 @@ public void setup() throws Exception { jobDataCountsPersister = mock(JobDataCountsPersister.class); normalizerFactory = mock(NormalizerFactory.class); auditor = mock(Auditor.class); + clusterService = mock(ClusterService.class); MetaData metaData = mock(MetaData.class); SortedMap aliasOrIndexSortedMap = new TreeMap<>(); aliasOrIndexSortedMap.put(AnomalyDetectorsIndex.jobStateIndexWriteAlias(), mock(AliasOrIndex.Alias.class)); @@ -184,7 +187,7 @@ public void testOpenJob() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); when(jobTask.getAllocationId()).thenReturn(1L); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); assertEquals(1, manager.numberOfOpenJobs()); assertTrue(manager.jobHasActiveAutodetectProcess(jobTask)); verify(jobTask).updatePersistentTaskState(eq(new JobTaskState(JobState.OPENED, 1L)), any()); @@ -210,7 +213,7 @@ public void testOpenJob_withoutVersion() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn(job.getId()); AtomicReference errorHolder = new AtomicReference<>(); - manager.openJob(jobTask, clusterState, errorHolder::set); + manager.openJob(jobTask, clusterState, (e, b) -> errorHolder.set(e)); Exception error = errorHolder.get(); assertThat(error, is(notNullValue())); assertThat(error.getMessage(), equalTo("Cannot open job [no_version] because jobs created prior to version 5.5 are not supported")); @@ -245,7 +248,7 @@ public void testOpenJob_exceedMaxNumJobs() { settings.put(AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE.getKey(), 3); AutodetectProcessManager manager = spy(new AutodetectProcessManager(environment, settings.build(), client, threadPool, jobManager, jobResultsProvider, jobResultsPersister, jobDataCountsPersister, autodetectProcessFactory, - normalizerFactory, new NamedXContentRegistry(Collections.emptyList()), auditor)); + normalizerFactory, new NamedXContentRegistry(Collections.emptyList()), auditor, clusterService)); doReturn(executorService).when(manager).createAutodetectExecutorService(any()); doAnswer(invocationOnMock -> { @@ -256,22 +259,22 @@ public void testOpenJob_exceedMaxNumJobs() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("bar"); when(jobTask.getAllocationId()).thenReturn(1L); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("baz"); when(jobTask.getAllocationId()).thenReturn(2L); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); assertEquals(3, manager.numberOfOpenJobs()); Exception[] holder = new Exception[1]; jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foobar"); when(jobTask.getAllocationId()).thenReturn(3L); - manager.openJob(jobTask, clusterState, e -> holder[0] = e); + manager.openJob(jobTask, clusterState, (e, b) -> holder[0] = e); Exception e = holder[0]; assertEquals("max running job capacity [3] reached", e.getMessage()); @@ -280,7 +283,7 @@ public void testOpenJob_exceedMaxNumJobs() { when(jobTask.getJobId()).thenReturn("baz"); manager.closeJob(jobTask, false, null); assertEquals(2, manager.numberOfOpenJobs()); - manager.openJob(jobTask, clusterState, e1 -> {}); + manager.openJob(jobTask, clusterState, (e1, b) -> {}); assertEquals(3, manager.numberOfOpenJobs()); } @@ -292,7 +295,7 @@ public void testProcessData() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); DataLoadParams params = new DataLoadParams(TimeRange.builder().build(), Optional.empty()); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); manager.processData(jobTask, analysisRegistry, createInputStream(""), randomFrom(XContentType.values()), params, (dataCounts1, e) -> {}); assertEquals(1, manager.numberOfOpenJobs()); @@ -315,7 +318,7 @@ public void testProcessDataThrowsElasticsearchStatusException_onIoException() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); Exception[] holder = new Exception[1]; manager.processData(jobTask, analysisRegistry, inputStream, xContentType, params, (dataCounts1, e) -> holder[0] = e); assertNotNull(holder[0]); @@ -328,7 +331,7 @@ public void testCloseJob() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); manager.processData(jobTask, analysisRegistry, createInputStream(""), randomFrom(XContentType.values()), mock(DataLoadParams.class), (dataCounts1, e) -> {}); @@ -356,7 +359,7 @@ public void testCanCloseClosingJob() throws Exception { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); manager.processData(jobTask, analysisRegistry, createInputStream(""), randomFrom(XContentType.values()), mock(DataLoadParams.class), (dataCounts1, e) -> {}); @@ -398,13 +401,13 @@ public void testCanKillClosingJob() throws Exception { doAnswer(invocationOnMock -> { killLatch.countDown(); return null; - }).when(communicator).killProcess(anyBoolean(), anyBoolean()); + }).when(communicator).killProcess(anyBoolean(), anyBoolean(), anyBoolean()); AutodetectProcessManager manager = createManager(communicator); assertEquals(0, manager.numberOfOpenJobs()); JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); manager.processData(jobTask, analysisRegistry, createInputStream(""), randomFrom(XContentType.values()), mock(DataLoadParams.class), (dataCounts1, e) -> {}); @@ -433,7 +436,7 @@ public void testBucketResetMessageIsSent() { InputStream inputStream = createInputStream(""); JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); manager.processData(jobTask, analysisRegistry, inputStream, xContentType, params, (dataCounts1, e) -> {}); verify(communicator).writeToJob(same(inputStream), same(analysisRegistry), same(xContentType), same(params), any()); } @@ -445,7 +448,7 @@ public void testFlush() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); InputStream inputStream = createInputStream(""); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); manager.processData(jobTask, analysisRegistry, inputStream, randomFrom(XContentType.values()), mock(DataLoadParams.class), (dataCounts1, e) -> {}); @@ -485,7 +488,7 @@ public void testCloseThrows() { // create a jobtask JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); manager.processData(jobTask, analysisRegistry, createInputStream(""), randomFrom(XContentType.values()), mock(DataLoadParams.class), (dataCounts1, e) -> { }); @@ -525,7 +528,7 @@ public void testJobHasActiveAutodetectProcess() { when(jobTask.getJobId()).thenReturn("foo"); assertFalse(manager.jobHasActiveAutodetectProcess(jobTask)); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); manager.processData(jobTask, analysisRegistry, createInputStream(""), randomFrom(XContentType.values()), mock(DataLoadParams.class), (dataCounts1, e) -> {}); @@ -543,7 +546,7 @@ public void testKillKillsAutodetectProcess() throws IOException { when(jobTask.getJobId()).thenReturn("foo"); assertFalse(manager.jobHasActiveAutodetectProcess(jobTask)); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); manager.processData(jobTask, analysisRegistry, createInputStream(""), randomFrom(XContentType.values()), mock(DataLoadParams.class), (dataCounts1, e) -> {}); @@ -551,7 +554,7 @@ public void testKillKillsAutodetectProcess() throws IOException { manager.killAllProcessesOnThisNode(); - verify(communicator).killProcess(false, false); + verify(communicator).killProcess(false, false, true); } public void testKillingAMissingJobFinishesTheTask() throws IOException { @@ -577,7 +580,7 @@ public void testProcessData_GivenStateNotOpened() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); InputStream inputStream = createInputStream(""); DataCounts[] dataCounts = new DataCounts[1]; manager.processData(jobTask, analysisRegistry, inputStream, @@ -607,12 +610,12 @@ public void testCreate_notEnoughThreads() throws IOException { (j, autodetectParams, e, onProcessCrash) -> autodetectProcess; AutodetectProcessManager manager = new AutodetectProcessManager(environment, Settings.EMPTY, client, threadPool, jobManager, jobResultsProvider, jobResultsPersister, jobDataCountsPersister, autodetectProcessFactory, - normalizerFactory, new NamedXContentRegistry(Collections.emptyList()), auditor); + normalizerFactory, new NamedXContentRegistry(Collections.emptyList()), auditor, clusterService); JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("my_id"); expectThrows(EsRejectedExecutionException.class, - () -> manager.create(jobTask, job, buildAutodetectParams(), e -> {})); + () -> manager.create(jobTask, job, buildAutodetectParams(), (e, b) -> {})); verify(autodetectProcess, times(1)).close(); } @@ -622,7 +625,7 @@ public void testCreate_givenFirstTime() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.create(jobTask, createJobDetails("foo"), buildAutodetectParams(), e -> {}); + manager.create(jobTask, createJobDetails("foo"), buildAutodetectParams(), (e, b) -> {}); String expectedNotification = "Loading model snapshot [N/A], job latest_record_timestamp [N/A]"; verify(auditor).info("foo", expectedNotification); @@ -638,7 +641,7 @@ public void testCreate_givenExistingModelSnapshot() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.create(jobTask, createJobDetails("foo"), buildAutodetectParams(), e -> {}); + manager.create(jobTask, createJobDetails("foo"), buildAutodetectParams(), (e, b) -> {}); String expectedNotification = "Loading model snapshot [snapshot-1] with " + "latest_record_timestamp [1970-01-01T00:00:00.000Z], " + @@ -657,7 +660,7 @@ public void testCreate_givenNonZeroCountsAndNoModelSnapshotNorQuantiles() { JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn("foo"); - manager.create(jobTask, createJobDetails("foo"), buildAutodetectParams(), e -> {}); + manager.create(jobTask, createJobDetails("foo"), buildAutodetectParams(), (e, b) -> {}); String expectedNotification = "Loading model snapshot [N/A], " + "job latest_record_timestamp [1970-01-01T00:00:00.000Z]"; @@ -706,7 +709,7 @@ private AutodetectProcessManager createNonSpyManager(String jobId) { (j, autodetectParams, e, onProcessCrash) -> autodetectProcess; return new AutodetectProcessManager(environment, Settings.EMPTY, client, threadPool, jobManager, jobResultsProvider, jobResultsPersister, jobDataCountsPersister, autodetectProcessFactory, - normalizerFactory, new NamedXContentRegistry(Collections.emptyList()), auditor); + normalizerFactory, new NamedXContentRegistry(Collections.emptyList()), auditor, clusterService); } private AutodetectParams buildAutodetectParams() { @@ -732,7 +735,7 @@ private AutodetectProcessManager createManager(AutodetectCommunicator communicat AutodetectProcessManager manager = new AutodetectProcessManager(environment, Settings.EMPTY, client, threadPool, jobManager, jobResultsProvider, jobResultsPersister, jobDataCountsPersister, autodetectProcessFactory, normalizerFactory, - new NamedXContentRegistry(Collections.emptyList()), auditor); + new NamedXContentRegistry(Collections.emptyList()), auditor, clusterService); manager = spy(manager); doReturn(communicator).when(manager).create(any(), any(), eq(buildAutodetectParams()), any()); return manager; @@ -742,7 +745,7 @@ private AutodetectProcessManager createManagerAndCallProcessData(AutodetectCommu AutodetectProcessManager manager = createManager(communicator); JobTask jobTask = mock(JobTask.class); when(jobTask.getJobId()).thenReturn(jobId); - manager.openJob(jobTask, clusterState, e -> {}); + manager.openJob(jobTask, clusterState, (e, b) -> {}); manager.processData(jobTask, analysisRegistry, createInputStream(""), randomFrom(XContentType.values()), mock(DataLoadParams.class), (dataCounts, e) -> {}); return manager; diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.set_upgrade_mode.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.set_upgrade_mode.json new file mode 100644 index 0000000000000..bb3220ece6b13 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ml.set_upgrade_mode.json @@ -0,0 +1,21 @@ +{ + "ml.set_upgrade_mode": { + "documentation": "TODO", + "methods": [ "POST" ], + "url": { + "path": "/_ml/set_upgrade_mode", + "paths": [ "/_ml/set_upgrade_mode" ], + "params": { + "enabled": { + "type": "boolean", + "description": "Whether to enable upgrade_mode ML setting or not. Defaults to false." + }, + "timeout": { + "type": "time", + "description": "Controls the time to wait before action times out. Defaults to 30 seconds" + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_info.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_info.yml index 0013661e6d436..16ac0973222d5 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_info.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/ml_info.yml @@ -15,6 +15,7 @@ teardown: - match: { defaults.anomaly_detectors.model_snapshot_retention_days: 1 } - match: { defaults.datafeeds.scroll_size: 1000 } - match: { limits: {} } + - match: { upgrade_mode: false } - do: cluster.put_settings: @@ -29,6 +30,7 @@ teardown: - match: { defaults.anomaly_detectors.model_snapshot_retention_days: 1 } - match: { defaults.datafeeds.scroll_size: 1000 } - match: { limits.max_model_memory_limit: "512mb" } + - match: { upgrade_mode: false } - do: cluster.put_settings: @@ -43,3 +45,4 @@ teardown: - match: { defaults.anomaly_detectors.model_snapshot_retention_days: 1 } - match: { defaults.datafeeds.scroll_size: 1000 } - match: { limits.max_model_memory_limit: "6gb" } + - match: { upgrade_mode: false } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml new file mode 100644 index 0000000000000..95ce67299c76a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/set_upgrade_mode.yml @@ -0,0 +1,212 @@ +--- +setup: + - skip: + features: headers + - do: + indices.create: + index: airline-data + body: + mappings: + response: + properties: + time: + type: date + airline: + type: keyword + airport: + type: text + responsetime: + type: float + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + ml.put_job: + job_id: set-upgrade-mode-job + body: > + { + "job_id":"set-upgrade-mode-job", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + ml.put_datafeed: + datafeed_id: set-upgrade-mode-job-datafeed + body: > + { + "job_id":"set-upgrade-mode-job", + "indexes":["airline-data"] + } + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + ml.open_job: + job_id: set-upgrade-mode-job + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + ml.start_datafeed: + datafeed_id: set-upgrade-mode-job-datafeed + +--- +teardown: + - skip: + features: headers + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + ml.set_upgrade_mode: + enabled: false + +--- +"Test setting upgrade_mode to false when it is already false": + - do: + ml.set_upgrade_mode: + enabled: false + - match: { acknowledged: true } + + - do: + cat.tasks: {} + - match: + $body: | + /.+job.+/ + + - do: + cat.tasks: {} + - match: + $body: | + /.+datafeed.+/ + - do: + ml.info: {} + - match: { upgrade_mode: false } + +--- +"Setting upgrade_mode to enabled": + - do: + ml.info: {} + - match: { upgrade_mode: false } + + - do: + ml.set_upgrade_mode: + enabled: true + - match: { acknowledged: true } + + - do: + ml.get_job_stats: + job_id: set-upgrade-mode-job + - match: { jobs.0.state: "opened" } + - match: { jobs.0.assignment_explanation: "persistent task cannot be assigned while upgrade mode is enabled." } + + - do: + ml.get_datafeed_stats: + datafeed_id: set-upgrade-mode-job-datafeed + - match: { datafeeds.0.state: "started" } + - match: { datafeeds.0.assignment_explanation: "persistent task cannot be assigned while upgrade mode is enabled." } + + - do: + tasks.list: + actions: "xpack/ml/job*,xpack/ml/datafeed*" + + - match: { nodes: { } } + + - do: + ml.info: {} + - match: { upgrade_mode: true } + +--- +"Setting upgrade mode to disabled from enabled": + - do: + ml.set_upgrade_mode: + enabled: true + - match: { acknowledged: true } + + - do: + ml.get_job_stats: + job_id: set-upgrade-mode-job + - match: { jobs.0.state: "opened" } + - match: { jobs.0.assignment_explanation: "persistent task cannot be assigned while upgrade mode is enabled." } + + - do: + ml.get_datafeed_stats: + datafeed_id: set-upgrade-mode-job-datafeed + - match: { datafeeds.0.state: "started" } + - match: { datafeeds.0.assignment_explanation: "persistent task cannot be assigned while upgrade mode is enabled." } + + - do: + tasks.list: + actions: "xpack/ml/job*,xpack/ml/datafeed*" + + - match: { nodes: { } } + + - do: + ml.info: {} + - match: { upgrade_mode: true } + + - do: + ml.set_upgrade_mode: + enabled: false + - match: { acknowledged: true } + + - do: + ml.get_job_stats: + job_id: set-upgrade-mode-job + - match: { jobs.0.state: "opened" } + - match: { jobs.0.assignment_explanation: "" } + + - do: + ml.get_datafeed_stats: + datafeed_id: set-upgrade-mode-job-datafeed + - match: { datafeeds.0.state: "started" } + - match: { datafeeds.0.assignment_explanation: "" } + + - do: + cat.tasks: {} + - match: + $body: | + /.+job.+/ + + - do: + cat.tasks: {} + - match: + $body: | + /.+datafeed.+/ + +--- +"Attempt to open job when upgrade_mode is enabled": + - do: + ml.set_upgrade_mode: + enabled: true + - match: { acknowledged: true } + + - do: + ml.put_job: + job_id: failing-set-upgrade-mode-job + body: > + { + "job_id":"failing-set-upgrade-mode-job", + "analysis_config" : { + "bucket_span": "1h", + "detectors" :[{"function":"count"}] + }, + "data_description" : { + "format":"xcontent", + "time_field":"time", + "time_format":"epoch" + } + } + + - do: + catch: /Cannot open jobs when upgrade mode is enabled/ + ml.open_job: + job_id: failing-set-upgrade-mode-job From 0a850f032bb3093663ab39737c4c9d0748ff99d2 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Mon, 28 Jan 2019 16:36:39 +0100 Subject: [PATCH 34/57] Handle deprecation warnings in a permissive manner Relates to #37290 --- .../xpack/restart/MlMigrationFullClusterRestartIT.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java index 0e5ee72546487..916ad9c5cb57c 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/MlMigrationFullClusterRestartIT.java @@ -7,7 +7,9 @@ import org.elasticsearch.Version; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; +import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -70,6 +72,9 @@ private void createTestIndex() throws IOException { "\"airline\": {\"type\": \"keyword\"}," + "\"responsetime\": {\"type\": \"float\"}" + "}}}}"); + RequestOptions.Builder options = createTestIndex.getOptions().toBuilder(); + options.setWarningsHandler(WarningsHandler.PERMISSIVE); + createTestIndex.setOptions(options); client().performRequest(createTestIndex); } From 9ceb218d8597ed7aa565ce2b0e1d8f10d137c96a Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Sun, 27 Jan 2019 21:50:46 -0500 Subject: [PATCH 35/57] Adjust bwc version for put mapping requests Relates #37675 --- .../action/admin/indices/mapping/put/PutMappingRequest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 9b903a81e0327..ce1efb998d5c0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -312,7 +312,7 @@ public void readFrom(StreamInput in) throws IOException { in.readBoolean(); // updateAllTypes } concreteIndex = in.readOptionalWriteable(Index::new); - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + if (in.getVersion().onOrAfter(Version.V_6_7_0)) { origin = in.readOptionalString(); } else { origin = null; @@ -330,7 +330,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); // updateAllTypes } out.writeOptionalWriteable(concreteIndex); - if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + if (out.getVersion().onOrAfter(Version.V_6_7_0)) { out.writeOptionalString(origin); } } From 99b75a9bdf68f2629f7a4da5213511f1ca450655 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Mon, 28 Jan 2019 13:46:43 -0600 Subject: [PATCH 36/57] deprecate types for watcher (#37594) This commit adds deprecation warnings for index actions and search actions when executed via watcher. Unit and integration tests updated accordingly. relates #35190 --- .../test/watcher/ack_watch/10_basic.yml | 3 +-- .../ack_watch/20_ack_individual_action.yml | 3 +-- .../30_reset_ack_after_unmet_condition.yml | 1 - ...reset_ack_after_unmet_action_condition.yml | 1 - .../test/watcher/activate_watch/10_basic.yml | 3 +-- .../test/watcher/delete_watch/10_basic.yml | 3 +-- .../test/watcher/execute_watch/10_basic.yml | 3 --- .../watcher/execute_watch/20_transform.yml | 7 ------- .../test/watcher/execute_watch/70_invalid.yml | 1 - .../test/watcher/get_watch/10_basic.yml | 3 +-- .../test/watcher/get_watch/20_missing.yml | 3 +-- .../test/watcher/put_watch/10_basic.yml | 3 +-- .../20_put_watch_with_throttle_period.yml | 3 +-- ..._put_watch_with_action_throttle_period.yml | 3 +-- .../put_watch/40_put_watch_as_inactive.yml | 3 +-- .../60_put_watch_with_action_condition.yml | 3 +-- ...0_put_watch_with_index_action_using_id.yml | 10 +++------ .../put_watch/91_search_total_hits_as_int.yml | 3 --- .../watcher/actions/index/IndexAction.java | 6 ++++++ .../search/WatcherSearchTemplateRequest.java | 10 ++++++++- .../search/WatcherSearchTemplateService.java | 4 +++- .../actions/index/IndexActionTests.java | 15 +++++++++++-- .../watcher/support/WatcherUtilsTests.java | 18 ++++++++++++---- .../WatcherSearchTemplateRequestTests.java | 18 ++++++++++++++++ .../xpack/watcher/test/WatcherTestUtils.java | 2 +- .../xpack/watcher/watch/WatchTests.java | 2 +- .../xpack/restart/FullClusterRestartIT.java | 21 ++++++++++++------- .../test/10_templated_role_query.yml | 2 -- .../test/11_templated_role_query_runas.yml | 2 -- .../test/20_small_users_one_index.yml | 4 ---- .../rest-api-spec/test/30_search_template.yml | 2 -- .../20_test_run_as_execute_watch.yml | 9 -------- .../60_chain_input_with_transform.yml | 2 -- 33 files changed, 93 insertions(+), 83 deletions(-) diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml index 385f2e9da6503..9c861e3dcd831 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/10_basic.yml @@ -25,8 +25,7 @@ "actions": { "test_index": { "index": { - "index": "test", - "doc_type": "test2" + "index": "test" } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml index b59fd561a7594..813e1f0c88899 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/20_ack_individual_action.yml @@ -25,8 +25,7 @@ "actions": { "test_index": { "index": { - "index": "test", - "doc_type": "test2" + "index": "test" } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml index 2f3a815346484..2a9a4959de4c2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/30_reset_ack_after_unmet_condition.yml @@ -38,7 +38,6 @@ teardown: "indexme" : { "index" : { "index" : "my_test_index", - "doc_type" : "my-type", "doc_id": "my-id" } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml index 93639163ac04b..946f23a2f5a4e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/ack_watch/40_reset_ack_after_unmet_action_condition.yml @@ -38,7 +38,6 @@ teardown: }, "index" : { "index" : "my_test_index", - "doc_type" : "my-type", "doc_id": "my-id" } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml index 221887494c5eb..99459119e3cdf 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/activate_watch/10_basic.yml @@ -25,8 +25,7 @@ "actions": { "test_index": { "index": { - "index": "test", - "doc_type": "test2" + "index": "test" } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml index 44d4e187e3fb1..d22b66f85d188 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/delete_watch/10_basic.yml @@ -38,8 +38,7 @@ teardown: "actions": { "test_index": { "index": { - "index": "test", - "doc_type": "test2" + "index": "test" } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml index b3ad1e6d545ff..1fd3c06b2eee7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/10_basic.yml @@ -33,7 +33,6 @@ teardown: "indexme" : { "index" : { "index" : "my_test_index", - "doc_type" : "my-type", "doc_id": "my-id" } } @@ -87,7 +86,6 @@ teardown: "indexme" : { "index" : { "index" : "my_test_index", - "doc_type" : "my-type", "doc_id": "my-id" } } @@ -130,7 +128,6 @@ teardown: "indexme" : { "index" : { "index" : "my_test_index", - "doc_type" : "my-type", "refresh" : "wait_for", "doc_id": "my-id" } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml index 59ebacbfe902d..3766cb6c4a788 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/20_transform.yml @@ -9,7 +9,6 @@ setup: - do: index: index: my_test_index - type: doc id: my_id refresh: true body: > @@ -49,7 +48,6 @@ setup: "indexme" : { "index" : { "index" : "my_test_index", - "doc_type" : "doc", "doc_id": "my-id" } } @@ -64,7 +62,6 @@ setup: - do: get: index: my_test_index - type: doc id: my_id - match: { _source.key: "value" } @@ -82,7 +79,6 @@ setup: - do: index: index: my_test_index - type: doc id: my_id refresh: true body: > @@ -123,7 +119,6 @@ setup: "indexme" : { "index" : { "index" : "my_test_index", - "doc_type" : "doc", "doc_id": "my-id" } } @@ -138,7 +133,6 @@ setup: - do: get: index: my_test_index - type: doc id: my_id - match: { _source.key: "value" } @@ -183,7 +177,6 @@ setup: "indexme" : { "index" : { "index" : "my_test_index", - "doc_type" : "doc", "doc_id": "my-id" } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml index fc8687eb699b1..f13c5faf59959 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/execute_watch/70_invalid.yml @@ -38,7 +38,6 @@ "indexme" : { "index" : { "index" : "my_test_index", - "doc_type" : "my-type", "doc_id": "my-id" } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml index 180b72f2ba471..3ae5492328702 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/10_basic.yml @@ -38,8 +38,7 @@ teardown: "actions": { "test_index": { "index": { - "index": "test", - "doc_type": "test2" + "index": "test" } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml index ee4fd2e7e43a8..fc795005ac8a8 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/get_watch/20_missing.yml @@ -30,8 +30,7 @@ "actions": { "test_index": { "index": { - "index": "test", - "doc_type": "test2" + "index": "test" } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml index b2ea5b8042f69..78d1b6e65e666 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/10_basic.yml @@ -29,8 +29,7 @@ "actions": { "test_index": { "index": { - "index": "test", - "doc_type": "test2" + "index": "test" } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml index 14bd682bd02d6..ab8d852dab3d4 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/20_put_watch_with_throttle_period.yml @@ -39,8 +39,7 @@ teardown: "actions": { "test_index": { "index": { - "index": "test", - "doc_type": "test2" + "index": "test" } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml index db4013bc38a25..a48d667066ef3 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/30_put_watch_with_action_throttle_period.yml @@ -39,8 +39,7 @@ teardown: "test_index": { "throttle_period" : "10s", "index": { - "index": "test", - "doc_type": "test2" + "index": "test" } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml index 23075ecfa7940..47b27d6b9be3e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/40_put_watch_as_inactive.yml @@ -39,8 +39,7 @@ teardown: "actions": { "test_index": { "index": { - "index": "test", - "doc_type": "test2" + "index": "test" } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml index 670a64381d041..bc26a60e4702f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/60_put_watch_with_action_condition.yml @@ -43,8 +43,7 @@ teardown: } }, "index": { - "index": "test", - "doc_type": "test2" + "index": "test" } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml index 50420c1e4eeda..7bad6c8f1eebf 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/70_put_watch_with_index_action_using_id.yml @@ -37,7 +37,6 @@ teardown: "test_index": { "index": { "index": "my_test_index", - "doc_type": "test2", "doc_id": "test_id1" } } @@ -86,8 +85,7 @@ teardown: "actions": { "test_index": { "index": { - "index": "my_test_index", - "doc_type": "test2" + "index": "my_test_index" } } } @@ -143,8 +141,7 @@ teardown: "actions": { "test_index": { "index": { - "index": "my_test_index", - "doc_type": "test2" + "index": "my_test_index" } } } @@ -202,8 +199,7 @@ teardown: "actions": { "test_index": { "index": { - "index": "my_test_index", - "doc_type": "test2" + "index": "my_test_index" } } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/91_search_total_hits_as_int.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/91_search_total_hits_as_int.yml index 46986438ee4a4..eba7f75a75968 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/91_search_total_hits_as_int.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/put_watch/91_search_total_hits_as_int.yml @@ -7,7 +7,6 @@ setup: - do: index: index: my_test_index - type: doc id: my_id refresh: true body: > @@ -97,7 +96,6 @@ setup: }, "index" : { "index" : "my_test_index", - "doc_type" : "doc", "doc_id": "my-id" } } @@ -108,7 +106,6 @@ setup: - do: get: index: my_test_index - type: doc id: my_id - match: { _source.key: "value" } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexAction.java index ceb6ac88f17f6..1b9bc373aae47 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/IndexAction.java @@ -5,10 +5,12 @@ */ package org.elasticsearch.xpack.watcher.actions.index; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -34,6 +36,9 @@ public class IndexAction implements Action { @Nullable final DateTimeZone dynamicNameTimeZone; @Nullable final RefreshPolicy refreshPolicy; + private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(IndexAction.class)); + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in a watcher index action is deprecated."; + public IndexAction(@Nullable String index, @Nullable String docType, @Nullable String docId, @Nullable String executionTimeField, @Nullable TimeValue timeout, @Nullable DateTimeZone dynamicNameTimeZone, @Nullable RefreshPolicy refreshPolicy) { @@ -151,6 +156,7 @@ public static IndexAction parse(String watchId, String actionId, XContentParser } } else if (token == XContentParser.Token.VALUE_STRING) { if (Field.DOC_TYPE.match(currentFieldName, parser.getDeprecationHandler())) { + deprecationLogger.deprecatedAndMaybeLog("watcher_index_action", TYPES_DEPRECATION_MESSAGE); docType = parser.text(); } else if (Field.DOC_ID.match(currentFieldName, parser.getDeprecationHandler())) { docId = parser.text(); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequest.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequest.java index 245093f3b385c..670f4ffe66852 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequest.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequest.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.watcher.support.search; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.IndicesOptions; @@ -13,6 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -43,6 +45,10 @@ public class WatcherSearchTemplateRequest implements ToXContentObject { private final BytesReference searchSource; private boolean restTotalHitsAsInt = true; + private static final DeprecationLogger deprecationLogger = + new DeprecationLogger(LogManager.getLogger(WatcherSearchTemplateRequest.class)); + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in a watcher search request is deprecated."; + public WatcherSearchTemplateRequest(String[] indices, String[] types, SearchType searchType, IndicesOptions indicesOptions, BytesReference searchSource) { this.indices = indices; @@ -203,6 +209,7 @@ public static WatcherSearchTemplateRequest fromXContent(XContentParser parser, S } } } else if (TYPES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + deprecationLogger.deprecatedAndMaybeLog("watcher_search_input", TYPES_DEPRECATION_MESSAGE); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token == XContentParser.Token.VALUE_STRING) { types.add(parser.textOrNull()); @@ -278,6 +285,7 @@ public static WatcherSearchTemplateRequest fromXContent(XContentParser parser, S String indicesStr = parser.text(); indices.addAll(Arrays.asList(Strings.delimitedListToStringArray(indicesStr, ",", " \t"))); } else if (TYPES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + deprecationLogger.deprecatedAndMaybeLog("watcher_search_input", TYPES_DEPRECATION_MESSAGE); String typesStr = parser.text(); types.addAll(Arrays.asList(Strings.delimitedListToStringArray(typesStr, ",", " \t"))); } else if (SEARCH_TYPE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { @@ -305,7 +313,7 @@ public static WatcherSearchTemplateRequest fromXContent(XContentParser parser, S } WatcherSearchTemplateRequest request = new WatcherSearchTemplateRequest(indices.toArray(new String[0]), - types.toArray(new String[0]), searchType, indicesOptions, searchSource, template); + types.size() == 0 ? null : types.toArray(new String[0]), searchType, indicesOptions, searchSource, template); request.setRestTotalHitsAsInt(totalHitsAsInt); return request; } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java index 4cc3b0cb709aa..d86a63948c7ac 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateService.java @@ -57,7 +57,9 @@ public String renderTemplate(Script source, WatchExecutionContext ctx, Payload p public SearchRequest toSearchRequest(WatcherSearchTemplateRequest request) throws IOException { SearchRequest searchRequest = new SearchRequest(request.getIndices()); - searchRequest.types(request.getTypes()); + if (request.getTypes() != null) { + searchRequest.types(request.getTypes()); + } searchRequest.searchType(request.getSearchType()); searchRequest.indicesOptions(request.getIndicesOptions()); SearchSourceBuilder sourceBuilder = SearchSourceBuilder.searchSource(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java index 346b2d0e85cdf..85f17fbd0e8d1 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java @@ -78,7 +78,6 @@ public void testParser() throws Exception { if (includeIndex) { builder.field(IndexAction.Field.INDEX.getPreferredName(), "test-index"); } - builder.field(IndexAction.Field.DOC_TYPE.getPreferredName(), "test-type"); if (timestampField != null) { builder.field(IndexAction.Field.EXECUTION_TIME_FIELD.getPreferredName(), timestampField); } @@ -93,7 +92,6 @@ public void testParser() throws Exception { ExecutableIndexAction executable = actionParser.parseExecutable(randomAlphaOfLength(5), randomAlphaOfLength(3), parser); - assertThat(executable.action().docType, equalTo("test-type")); if (includeIndex) { assertThat(executable.action().index, equalTo("test-index")); } @@ -103,6 +101,19 @@ public void testParser() throws Exception { assertThat(executable.action().timeout, equalTo(writeTimeout)); } + public void testDeprecationTypes() throws Exception { + XContentBuilder builder = jsonBuilder(); + builder.startObject(); + builder.field(IndexAction.Field.DOC_TYPE.getPreferredName(), "test-type"); + builder.endObject(); + IndexActionFactory actionParser = new IndexActionFactory(Settings.EMPTY, client); + XContentParser parser = createParser(builder); + parser.nextToken(); + ExecutableIndexAction executable = actionParser.parseExecutable(randomAlphaOfLength(5), randomAlphaOfLength(3), parser); + assertThat(executable.action().docType, equalTo("test-type")); + assertWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE); + } + public void testParserFailure() throws Exception { // wrong type for field expectParseFailure(jsonBuilder() diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherUtilsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherUtilsTests.java index 401d101a469d3..7aab1d4d14504 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherUtilsTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherUtilsTests.java @@ -90,7 +90,7 @@ public void testResponseToData() throws Exception { public void testSerializeSearchRequest() throws Exception { String[] expectedIndices = generateRandomStringArray(5, 5, true); - String[] expectedTypes = generateRandomStringArray(2, 5, true); + String[] expectedTypes = generateRandomStringArray(2, 5, true, false); IndicesOptions expectedIndicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), WatcherSearchTemplateRequest.DEFAULT_INDICES_OPTIONS); SearchType expectedSearchType = getRandomSupportedSearchType(); @@ -129,7 +129,6 @@ public void testSerializeSearchRequest() throws Exception { WatcherSearchTemplateRequest result = WatcherSearchTemplateRequest.fromXContent(parser, DEFAULT_SEARCH_TYPE); assertThat(result.getIndices(), arrayContainingInAnyOrder(expectedIndices != null ? expectedIndices : new String[0])); - assertThat(result.getTypes(), arrayContainingInAnyOrder(expectedTypes != null ? expectedTypes : new String[0])); assertThat(result.getIndicesOptions(), equalTo(expectedIndicesOptions)); assertThat(result.getSearchType(), equalTo(expectedSearchType)); @@ -143,6 +142,12 @@ public void testSerializeSearchRequest() throws Exception { assertThat(result.getTemplate().getIdOrCode(), equalTo(expectedSource.utf8ToString())); assertThat(result.getTemplate().getType(), equalTo(ScriptType.INLINE)); } + if (expectedTypes == null) { + assertNull(result.getTypes()); + } else { + assertThat(result.getTypes(), arrayContainingInAnyOrder(expectedTypes)); + assertWarnings(WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE); + } } public void testDeserializeSearchRequest() throws Exception { @@ -161,7 +166,7 @@ public void testDeserializeSearchRequest() throws Exception { String[] types = Strings.EMPTY_ARRAY; if (randomBoolean()) { - types = generateRandomStringArray(2, 5, false); + types = generateRandomStringArray(2, 5, false, false); if (randomBoolean()) { builder.array("types", types); } else { @@ -220,7 +225,6 @@ public void testDeserializeSearchRequest() throws Exception { WatcherSearchTemplateRequest result = WatcherSearchTemplateRequest.fromXContent(parser, DEFAULT_SEARCH_TYPE); assertThat(result.getIndices(), arrayContainingInAnyOrder(indices)); - assertThat(result.getTypes(), arrayContainingInAnyOrder(types)); assertThat(result.getIndicesOptions(), equalTo(indicesOptions)); assertThat(result.getSearchType(), equalTo(searchType)); if (source == null) { @@ -236,6 +240,12 @@ public void testDeserializeSearchRequest() throws Exception { assertThat(result.getTemplate().getParams(), equalTo(template.getParams())); assertThat(result.getTemplate().getLang(), equalTo(stored ? null : "mustache")); } + if (types == Strings.EMPTY_ARRAY) { + assertNull(result.getTypes()); + } else { + assertThat(result.getTypes(), arrayContainingInAnyOrder(types)); + assertWarnings(WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE); + } } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequestTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequestTests.java index 07fd3c7765485..c535e1036824a 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequestTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/search/WatcherSearchTemplateRequestTests.java @@ -39,6 +39,24 @@ public void testDefaultHitCountsConfigured() throws IOException { assertHitCount(source, hitCountsAsInt); } + public void testDeprecationForSingleType() throws IOException { + String source = "{\"types\":\"mytype\"}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + parser.nextToken(); + WatcherSearchTemplateRequest.fromXContent(parser, SearchType.QUERY_THEN_FETCH); + } + assertWarnings(WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE); + } + + public void testDeprecationForMultiType() throws IOException { + String source = "{\"types\":[\"mytype1\",\"mytype2\"]}"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + parser.nextToken(); + WatcherSearchTemplateRequest.fromXContent(parser, SearchType.QUERY_THEN_FETCH); + } + assertWarnings(WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE); + } + private void assertHitCount(String source, boolean expectedHitCountAsInt) throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { parser.nextToken(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java index bb5a6eabdd5b9..e22d3d6f0837c 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/WatcherTestUtils.java @@ -89,7 +89,7 @@ public static WatcherSearchTemplateRequest templateRequest(SearchSourceBuilder s try { XContentBuilder xContentBuilder = jsonBuilder(); xContentBuilder.value(sourceBuilder); - return new WatcherSearchTemplateRequest(indices, new String[0], searchType, + return new WatcherSearchTemplateRequest(indices, null, searchType, WatcherSearchTemplateRequest.DEFAULT_INDICES_OPTIONS, BytesReference.bytes(xContentBuilder)); } catch (IOException e) { throw new RuntimeException(e); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java index daf1f18f3bd2f..cb32c3eebec09 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchTests.java @@ -586,7 +586,7 @@ private List randomActions() { DateTimeZone timeZone = randomBoolean() ? DateTimeZone.UTC : null; TimeValue timeout = randomBoolean() ? timeValueSeconds(between(1, 10000)) : null; WriteRequest.RefreshPolicy refreshPolicy = randomBoolean() ? null : randomFrom(WriteRequest.RefreshPolicy.values()); - IndexAction action = new IndexAction("_index", "_type", randomBoolean() ? "123" : null, null, timeout, timeZone, + IndexAction action = new IndexAction("_index", null, randomBoolean() ? "123" : null, null, timeout, timeZone, refreshPolicy); list.add(new ActionWrapper("_index_" + randomAlphaOfLength(8), randomThrottler(), AlwaysConditionTests.randomCondition(scriptService), randomTransform(), diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index e52a6dd3b4303..d1aefb4000890 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -24,9 +24,11 @@ import org.elasticsearch.upgrades.AbstractFullClusterRestartTestCase; import org.elasticsearch.xpack.core.watcher.client.WatchSourceBuilder; import org.elasticsearch.xpack.security.support.SecurityIndexManager; +import org.elasticsearch.xpack.watcher.actions.index.IndexAction; import org.elasticsearch.xpack.watcher.actions.logging.LoggingAction; import org.elasticsearch.xpack.watcher.common.text.TextTemplate; import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; +import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; import org.hamcrest.Matcher; @@ -193,7 +195,7 @@ public void testWatcher() throws Exception { logger.info("checking that upgrade procedure on the new cluster is no longer required"); Map responseAfter = entityAsMap(client().performRequest( - new Request("GET", "/_migration/assistance"))); + new Request("GET", "/_migration/assistance"))); @SuppressWarnings("unchecked") Map indicesAfter = (Map) responseAfter.get("indices"); assertNull(indicesAfter.get(".watches")); } else { @@ -207,7 +209,7 @@ public void testWatcher() throws Exception { Map statsWatchResponse = entityAsMap(client().performRequest(new Request("GET", "_watcher/stats"))); @SuppressWarnings("unchecked") List states = ((List) statsWatchResponse.get("stats")) - .stream().map(o -> ((Map) o).get("watcher_state")).collect(Collectors.toList()); + .stream().map(o -> ((Map) o).get("watcher_state")).collect(Collectors.toList()); assertThat(states, everyItem(is("started"))); }); @@ -223,10 +225,10 @@ public void testWatcher() throws Exception { assertThat(stopWatchResponse.get("acknowledged"), equalTo(Boolean.TRUE)); assertBusy(() -> { Map statsStoppedWatchResponse = entityAsMap(client().performRequest( - new Request("GET", "_watcher/stats"))); + new Request("GET", "_watcher/stats"))); @SuppressWarnings("unchecked") List states = ((List) statsStoppedWatchResponse.get("stats")) - .stream().map(o -> ((Map) o).get("watcher_state")).collect(Collectors.toList()); + .stream().map(o -> ((Map) o).get("watcher_state")).collect(Collectors.toList()); assertThat(states, everyItem(is("stopped"))); }); } @@ -456,7 +458,10 @@ private void assertOldTemplatesAreDeleted() throws IOException { @SuppressWarnings("unchecked") private void assertWatchIndexContentsWork() throws Exception { // Fetch a basic watch - Map bwcWatch = entityAsMap(client().performRequest(new Request("GET", "_watcher/watch/bwc_watch"))); + Request getRequest = new Request("GET", "_watcher/watch/bwc_watch"); + getRequest.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE, + WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE)); + Map bwcWatch = entityAsMap(client().performRequest(getRequest)); logger.error("-----> {}", bwcWatch); @@ -467,11 +472,13 @@ private void assertWatchIndexContentsWork() throws Exception { assertThat(ObjectPath.eval("input.search.timeout_in_millis", source), equalTo(timeout)); assertThat(ObjectPath.eval("actions.index_payload.transform.search.timeout_in_millis", source), equalTo(timeout)); assertThat(ObjectPath.eval("actions.index_payload.index.index", source), equalTo("bwc_watch_index")); - assertThat(ObjectPath.eval("actions.index_payload.index.doc_type", source), equalTo("bwc_watch_type")); assertThat(ObjectPath.eval("actions.index_payload.index.timeout_in_millis", source), equalTo(timeout)); // Fetch a watch with "fun" throttle periods - bwcWatch = entityAsMap(client().performRequest(new Request("GET", "_watcher/watch/bwc_throttle_period"))); + getRequest = new Request("GET", "_watcher/watch/bwc_throttle_period"); + getRequest.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE, + WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE)); + bwcWatch = entityAsMap(client().performRequest(getRequest)); assertThat(bwcWatch.get("found"), equalTo(true)); source = (Map) bwcWatch.get("watch"); assertEquals(timeout, source.get("throttle_period_in_millis")); diff --git a/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/10_templated_role_query.yml b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/10_templated_role_query.yml index f1f6ab886375d..84d8d98e27384 100644 --- a/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/10_templated_role_query.yml +++ b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/10_templated_role_query.yml @@ -110,7 +110,6 @@ setup: - do: index: index: foobar - type: type id: 1 body: > { @@ -119,7 +118,6 @@ setup: - do: index: index: foobar - type: type id: 2 body: > { diff --git a/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/11_templated_role_query_runas.yml b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/11_templated_role_query_runas.yml index 0011fa7b4446b..2f4755943aa2d 100644 --- a/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/11_templated_role_query_runas.yml +++ b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/11_templated_role_query_runas.yml @@ -110,7 +110,6 @@ setup: - do: index: index: foobar - type: type id: 1 body: > { @@ -119,7 +118,6 @@ setup: - do: index: index: foobar - type: type id: 2 body: > { diff --git a/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/20_small_users_one_index.yml b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/20_small_users_one_index.yml index a8fa79a04fc27..4c4e673cd29ef 100644 --- a/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/20_small_users_one_index.yml +++ b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/20_small_users_one_index.yml @@ -88,7 +88,6 @@ teardown: Authorization: "Basic am9lOngtcGFjay10ZXN0LXBhc3N3b3Jk" index: index: shared_logs - type: type pipeline: "my_pipeline" body: > { @@ -99,7 +98,6 @@ teardown: Authorization: "Basic am9objp4LXBhY2stdGVzdC1wYXNzd29yZA==" index: index: shared_logs - type: type pipeline: "my_pipeline" body: > { @@ -158,7 +156,6 @@ teardown: Authorization: "Basic am9lOngtcGFjay10ZXN0LXBhc3N3b3Jk" index: index: shared_logs - type: type pipeline: "my_pipeline" body: > { @@ -169,7 +166,6 @@ teardown: Authorization: "Basic am9objp4LXBhY2stdGVzdC1wYXNzd29yZA==" index: index: shared_logs - type: type pipeline: "my_pipeline" body: > { diff --git a/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/30_search_template.yml b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/30_search_template.yml index 913979a4bb5e5..a208bda67cfe2 100644 --- a/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/30_search_template.yml +++ b/x-pack/qa/smoke-test-security-with-mustache/src/test/resources/rest-api-spec/test/30_search_template.yml @@ -32,7 +32,6 @@ setup: - do: index: index: foobar - type: type id: 1 body: title: "contains some words" @@ -40,7 +39,6 @@ setup: - do: index: index: unauthorized_index - type: type id: 2 body: title: "contains some words too" diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml index bd0d32ef9cc5f..ec0be2532a6ee 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/resources/rest-api-spec/test/watcher/watcher_and_security/20_test_run_as_execute_watch.yml @@ -8,7 +8,6 @@ setup: - do: index: index: my_test_index - type: type id: 1 refresh: true body: > @@ -220,7 +219,6 @@ teardown: "index": { "index": { "index" : "my_test_index", - "doc_type" : "type", "doc_id": "my-id" } } @@ -236,7 +234,6 @@ teardown: - do: get: index: my_test_index - type: type id: my-id # this value is from the document in the my_text_index index, see the setup - match: { _source.hits.hits.0._source.value: "15" } @@ -272,7 +269,6 @@ teardown: "index": { "index": { "index" : "my_test_index", - "doc_type" : "type", "doc_id": "my-id" } } @@ -288,7 +284,6 @@ teardown: - do: get: index: my_test_index - type: type id: my-id - match: { _source.hits.total: 0 } @@ -315,7 +310,6 @@ teardown: "index": { "index": { "index" : "my_test_index", - "doc_type" : "type", "doc_id": "my-id" } } @@ -339,7 +333,6 @@ teardown: - do: get: index: my_test_index - type: type id: 1 - match: { _id: "1" } @@ -366,7 +359,6 @@ teardown: "index": { "index": { "index" : "index_not_allowed_to_read", - "doc_type" : "type", "doc_id": "my-id" } } @@ -390,7 +382,6 @@ teardown: - do: get: index: index_not_allowed_to_read - type: type id: 1 catch: forbidden diff --git a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml index 08097e0cace17..69fd7b4d575ee 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml +++ b/x-pack/qa/smoke-test-watcher/src/test/resources/rest-api-spec/test/painless/60_chain_input_with_transform.yml @@ -34,7 +34,6 @@ "index" : { "index" : { "index" : "my-index", - "doc_type" : "my-type", "doc_id" : "my-id" } } @@ -47,7 +46,6 @@ - do: get: index: my-index - type: my-type id: my-id - match: { _source.first.foo: "bar" } From 6325e55dbfc494f2cb1e33411c91f60661136851 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 28 Jan 2019 11:56:36 -0800 Subject: [PATCH 37/57] Use quotes in reproduce line task for vagrant failure (#37884) This commit wraps the packaging test task for reproducing a vagrant test failure in quotes. The task names sometimes contain "#", which confuses bash. --- .../org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index fec6b2eab7405..9f5364b78a896 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -630,7 +630,7 @@ class VagrantTestPlugin implements Plugin { void afterExecute(Task task, TaskState state) { final String gradlew = Os.isFamily(Os.FAMILY_WINDOWS) ? "gradlew" : "./gradlew" if (state.failure != null) { - println "REPRODUCE WITH: ${gradlew} ${reproTaskPath} -Dtests.seed=${project.testSeed} " + println "REPRODUCE WITH: ${gradlew} \"${reproTaskPath}\" -Dtests.seed=${project.testSeed} " } } } From 557fcf915e987ac2afda8b7b79abbe00de2a2b25 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 28 Jan 2019 15:25:56 -0500 Subject: [PATCH 38/57] Wait for mapping in testReadRequestsReturnLatestMappingVersion (#37886) If the index request is executed before the mapping update is applied on the IndexShard, the index request will perform a dynamic mapping update. This mapping update will be timeout (i.e, ProcessClusterEventTimeoutException) because the latch is not open. This leads to the failure of the index request and the test. This commit makes sure the mapping is ready before we execute the index request. Closes #37807 --- .../xpack/ccr/FollowerFailOverIT.java | 49 ++++++++++++------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java index b49e7c9ced7ce..73cc94b4703a9 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowerFailOverIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; @@ -230,8 +231,7 @@ public void testAddNewReplicasOnFollower() throws Exception { pauseFollow("follower-index"); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37807") - public void testReadRequestsReturnsLatestMappingVersion() throws Exception { + public void testReadRequestsReturnLatestMappingVersion() throws Exception { InternalTestCluster leaderCluster = getLeaderCluster(); Settings nodeAttributes = Settings.builder().put("node.attr.box", "large").build(); String dataNode = leaderCluster.startDataOnlyNode(nodeAttributes); @@ -244,6 +244,9 @@ public void testReadRequestsReturnsLatestMappingVersion() throws Exception { .put("index.routing.allocation.require.box", "large")) .get() ); + getFollowerCluster().startDataOnlyNode(nodeAttributes); + followerClient().execute(PutFollowAction.INSTANCE, putFollow("leader-index", "follower-index")).get(); + ensureFollowerGreen("follower-index"); ClusterService clusterService = leaderCluster.clusterService(dataNode); ShardId shardId = clusterService.state().routingTable().index("leader-index").shard(0).shardId(); IndicesService indicesService = leaderCluster.getInstance(IndicesService.class, dataNode); @@ -265,22 +268,30 @@ public void testReadRequestsReturnsLatestMappingVersion() throws Exception { }); leaderCluster.client().admin().indices().preparePutMapping().setType("doc") .setSource("balance", "type=long").setTimeout(TimeValue.ZERO).get(); - IndexResponse indexResp = leaderCluster.client(dataNode).prepareIndex("leader-index", "doc", "1") - .setSource("{\"balance\": 100}", XContentType.JSON).setTimeout(TimeValue.ZERO).get(); - assertThat(indexResp.getResult(), equalTo(DocWriteResponse.Result.CREATED)); - assertThat(indexShard.getGlobalCheckpoint(), equalTo(0L)); - getFollowerCluster().startDataOnlyNode(nodeAttributes); - followerClient().execute(PutFollowAction.INSTANCE, putFollow("leader-index", "follower-index")).get(); - ensureFollowerGreen("follower-index"); - // Make sure at least one read-request which requires mapping sync is completed. - assertBusy(() -> { - CcrClient ccrClient = new CcrClient(followerClient()); - FollowStatsAction.StatsResponses responses = ccrClient.followStats(new FollowStatsAction.StatsRequest()).actionGet(); - long bytesRead = responses.getStatsResponses().stream().mapToLong(r -> r.status().bytesRead()).sum(); - assertThat(bytesRead, Matchers.greaterThan(0L)); - }, 60, TimeUnit.SECONDS); - latch.countDown(); - assertIndexFullyReplicatedToFollower("leader-index", "follower-index"); - pauseFollow("follower-index"); + try { + // Make sure the mapping is ready on the shard before we execute the index request; otherwise the index request + // will perform a dynamic mapping update which however will be blocked because the latch is remained closed. + assertBusy(() -> { + DocumentMapper mapper = indexShard.mapperService().documentMapper("doc"); + assertNotNull(mapper); + assertNotNull(mapper.mappers().getMapper("balance")); + }); + IndexResponse indexResp = leaderCluster.client().prepareIndex("leader-index", "doc", "1") + .setSource("{\"balance\": 100}", XContentType.JSON).setTimeout(TimeValue.ZERO).get(); + assertThat(indexResp.getResult(), equalTo(DocWriteResponse.Result.CREATED)); + assertThat(indexShard.getGlobalCheckpoint(), equalTo(0L)); + // Make sure at least one read-request which requires mapping sync is completed. + assertBusy(() -> { + CcrClient ccrClient = new CcrClient(followerClient()); + FollowStatsAction.StatsResponses responses = ccrClient.followStats(new FollowStatsAction.StatsRequest()).actionGet(); + long bytesRead = responses.getStatsResponses().stream().mapToLong(r -> r.status().bytesRead()).sum(); + assertThat(bytesRead, Matchers.greaterThan(0L)); + }, 60, TimeUnit.SECONDS); + latch.countDown(); + assertIndexFullyReplicatedToFollower("leader-index", "follower-index"); + } finally { + latch.countDown(); // no effect if latch was counted down - this makes sure teardown can make progress. + pauseFollow("follower-index"); + } } } From 19529da2db2c845a344b637301cd150e7cd7f656 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 28 Jan 2019 13:04:38 -0800 Subject: [PATCH 39/57] [DOCS] Delayed data annotations (#37939) --- .../ml/delayed-data-detection.asciidoc | 59 +++++++++++-------- 1 file changed, 35 insertions(+), 24 deletions(-) diff --git a/docs/reference/ml/delayed-data-detection.asciidoc b/docs/reference/ml/delayed-data-detection.asciidoc index 2c2179205c554..872a45d724893 100644 --- a/docs/reference/ml/delayed-data-detection.asciidoc +++ b/docs/reference/ml/delayed-data-detection.asciidoc @@ -5,38 +5,49 @@ Delayed data are documents that are indexed late. That is to say, it is data related to a time that the {dfeed} has already processed. -When you create a datafeed, you can specify a {ref}/ml-datafeed-resource.html[`query_delay`] setting. -This setting enables the datafeed to wait for some time past real-time, which means any "late" data in this period -is fully indexed before the datafeed tries to gather it. However, if the setting is set too low, the datafeed may query -for data before it has been indexed and consequently miss that document. Conversely, if it is set too high, -analysis drifts farther away from real-time. The balance that is struck depends upon each use case and -the environmental factors of the cluster. +When you create a datafeed, you can specify a +{ref}/ml-datafeed-resource.html[`query_delay`] setting. This setting enables the +datafeed to wait for some time past real-time, which means any "late" data in +this period is fully indexed before the datafeed tries to gather it. However, if +the setting is set too low, the datafeed may query for data before it has been +indexed and consequently miss that document. Conversely, if it is set too high, +analysis drifts farther away from real-time. The balance that is struck depends +upon each use case and the environmental factors of the cluster. ==== Why worry about delayed data? -This is a particularly prescient question. If data are delayed randomly (and consequently missing from analysis), -the results of certain types of functions are not really affected. It all comes out ok in the end -as the delayed data is distributed randomly. An example would be a `mean` metric for a field in a large collection of data. -In this case, checking for delayed data may not provide much benefit. If data are consistently delayed, however, jobs with a `low_count` function may -provide false positives. In this situation, it would be useful to see if data -comes in after an anomaly is recorded so that you can determine a next course of action. +This is a particularly prescient question. If data are delayed randomly (and +consequently are missing from analysis), the results of certain types of +functions are not really affected. In these situations, it all comes out okay in +the end as the delayed data is distributed randomly. An example would be a `mean` +metric for a field in a large collection of data. In this case, checking for +delayed data may not provide much benefit. If data are consistently delayed, +however, jobs with a `low_count` function may provide false positives. In this +situation, it would be useful to see if data comes in after an anomaly is +recorded so that you can determine a next course of action. ==== How do we detect delayed data? In addition to the `query_delay` field, there is a -{ref}/ml-datafeed-resource.html#ml-datafeed-delayed-data-check-config[delayed data check config], which enables you to -configure the datafeed to look in the past for delayed data. Every 15 minutes or every `check_window`, -whichever is smaller, the datafeed triggers a document search over the configured indices. This search looks over a -time span with a length of `check_window` ending with the latest finalized bucket. That time span is partitioned into buckets, -whose length equals the bucket span of the associated job. The `doc_count` of those buckets are then compared with the -job's finalized analysis buckets to see whether any data has arrived since the analysis. If there is indeed missing data -due to their ingest delay, the end user is notified. +{ref}/ml-datafeed-resource.html#ml-datafeed-delayed-data-check-config[delayed data check config], +which enables you to configure the datafeed to look in the past for delayed data. +Every 15 minutes or every `check_window`, whichever is smaller, the datafeed +triggers a document search over the configured indices. This search looks over a +time span with a length of `check_window` ending with the latest finalized bucket. +That time span is partitioned into buckets, whose length equals the bucket span +of the associated job. The `doc_count` of those buckets are then compared with +the job's finalized analysis buckets to see whether any data has arrived since +the analysis. If there is indeed missing data due to their ingest delay, the end +user is notified. For example, you can see annotations in {kib} for the periods +where these delays occur. ==== What to do about delayed data? -The most common course of action is to simply to do nothing. For many functions and situations ignoring the data is -acceptable. However, if the amount of delayed data is too great or the situation calls for it, the next course -of action to consider is to increase the `query_delay` of the datafeed. This increased delay allows more time for data to be -indexed. If you have real-time constraints, however, an increased delay might not be desirable. -In which case, you would have to {ref}/tune-for-indexing-speed.html[tune for better indexing speed.] +The most common course of action is to simply to do nothing. For many functions +and situations, ignoring the data is acceptable. However, if the amount of +delayed data is too great or the situation calls for it, the next course of +action to consider is to increase the `query_delay` of the datafeed. This +increased delay allows more time for data to be indexed. If you have real-time +constraints, however, an increased delay might not be desirable. In which case, +you would have to {ref}/tune-for-indexing-speed.html[tune for better indexing speed]. From 49bd8715ff63584b685c583644d5a5291f509edf Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Mon, 28 Jan 2019 14:09:12 -0700 Subject: [PATCH 40/57] Inject Unfollow before Rollover and Shrink (#37625) We inject an Unfollow action before Shrink because the Shrink action cannot be safely used on a following index, as it may not be fully caught up with the leader index before the "original" following index is deleted and replaced with a non-following Shrunken index. The Unfollow action will verify that 1) the index is marked as "complete", and 2) all operations up to this point have been replicated from the leader to the follower before explicitly disconnecting the follower from the leader. Injecting an Unfollow action before the Rollover action is done mainly as a convenience: This allow users to use the same lifecycle policy on both the leader and follower cluster without having to explictly modify the policy to unfollow the index, while doing what we expect users to want in most cases. --- client/rest-high-level/build.gradle | 1 + .../client/IndexLifecycleIT.java | 52 ++++---- .../documentation/ILMDocumentationIT.java | 105 ++++++++-------- .../reference/ilm/policy-definitions.asciidoc | 30 +++++ .../ilm/using-policies-rollover.asciidoc | 1 + .../core/indexlifecycle/ReadOnlyAction.java | 1 - .../TimeseriesLifecycleType.java | 11 +- .../TimeseriesLifecycleTypeTests.java | 26 ++++ .../indexlifecycle/CCRIndexLifecycleIT.java | 113 +++++++++++++++++- .../TimeSeriesLifecycleActionsIT.java | 36 ++++++ 10 files changed, 297 insertions(+), 79 deletions(-) diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index b71ca82c7d094..22e6252892a7d 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -107,6 +107,7 @@ integTestCluster { // Truststore settings are not used since TLS is not enabled. Included for testing the get certificates API setting 'xpack.security.http.ssl.certificate_authorities', 'testnode.crt' setting 'xpack.security.transport.ssl.truststore.path', 'testnode.jks' + setting 'indices.lifecycle.poll_interval', '1000ms' keystoreSetting 'xpack.security.transport.ssl.truststore.secure_password', 'testnode' setupCommand 'setupDummyUser', 'bin/elasticsearch-users', diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java index 4ad6d2e6ce604..f74ba00436c63 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java @@ -184,31 +184,37 @@ public void testExplainLifecycle() throws Exception { createIndex("squash", Settings.EMPTY); - ExplainLifecycleRequest req = new ExplainLifecycleRequest("foo-01", "baz-01", "squash"); - ExplainLifecycleResponse response = execute(req, highLevelClient().indexLifecycle()::explainLifecycle, + // The injected Unfollow step will run pretty rapidly here, so we need + // to wait for it to settle into the "stable" step of waiting to be + // ready to roll over + assertBusy(() -> { + ExplainLifecycleRequest req = new ExplainLifecycleRequest("foo-01", "baz-01", "squash"); + ExplainLifecycleResponse response = execute(req, highLevelClient().indexLifecycle()::explainLifecycle, highLevelClient().indexLifecycle()::explainLifecycleAsync); - Map indexResponses = response.getIndexResponses(); - assertEquals(3, indexResponses.size()); - IndexLifecycleExplainResponse fooResponse = indexResponses.get("foo-01"); - assertNotNull(fooResponse); - assertTrue(fooResponse.managedByILM()); - assertEquals("foo-01", fooResponse.getIndex()); - assertEquals("hot", fooResponse.getPhase()); - assertEquals("rollover", fooResponse.getAction()); - assertEquals("check-rollover-ready", fooResponse.getStep()); - assertEquals(new PhaseExecutionInfo(policy.getName(), new Phase("", hotPhase.getMinimumAge(), hotPhase.getActions()), + Map indexResponses = response.getIndexResponses(); + assertEquals(3, indexResponses.size()); + IndexLifecycleExplainResponse fooResponse = indexResponses.get("foo-01"); + assertNotNull(fooResponse); + assertTrue(fooResponse.managedByILM()); + assertEquals("foo-01", fooResponse.getIndex()); + assertEquals("hot", fooResponse.getPhase()); + assertEquals("rollover", fooResponse.getAction()); + assertEquals("check-rollover-ready", fooResponse.getStep()); + assertEquals(new PhaseExecutionInfo(policy.getName(), new Phase("", hotPhase.getMinimumAge(), hotPhase.getActions()), 1L, expectedPolicyModifiedDate), fooResponse.getPhaseExecutionInfo()); - IndexLifecycleExplainResponse bazResponse = indexResponses.get("baz-01"); - assertNotNull(bazResponse); - assertTrue(bazResponse.managedByILM()); - assertEquals("baz-01", bazResponse.getIndex()); - assertEquals("hot", bazResponse.getPhase()); - assertEquals("rollover", bazResponse.getAction()); - assertEquals("check-rollover-ready", bazResponse.getStep()); - IndexLifecycleExplainResponse squashResponse = indexResponses.get("squash"); - assertNotNull(squashResponse); - assertFalse(squashResponse.managedByILM()); - assertEquals("squash", squashResponse.getIndex()); + IndexLifecycleExplainResponse bazResponse = indexResponses.get("baz-01"); + assertNotNull(bazResponse); + assertTrue(bazResponse.managedByILM()); + assertEquals("baz-01", bazResponse.getIndex()); + assertEquals("hot", bazResponse.getPhase()); + assertEquals("rollover", bazResponse.getAction()); + assertEquals("check-rollover-ready", bazResponse.getStep()); + IndexLifecycleExplainResponse squashResponse = indexResponses.get("squash"); + assertNotNull(squashResponse); + assertFalse(squashResponse.managedByILM()); + assertEquals("squash", squashResponse.getIndex()); + + }); } public void testDeleteLifecycle() throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java index bec71fe1e44ce..5ccb0c8393304 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java @@ -22,6 +22,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -38,17 +39,17 @@ import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusResponse; import org.elasticsearch.client.indexlifecycle.LifecyclePolicy; -import org.elasticsearch.client.indexlifecycle.OperationMode; import org.elasticsearch.client.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.client.indexlifecycle.OperationMode; import org.elasticsearch.client.indexlifecycle.Phase; import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest; import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyResponse; import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; import org.elasticsearch.client.indexlifecycle.RolloverAction; +import org.elasticsearch.client.indexlifecycle.ShrinkAction; import org.elasticsearch.client.indexlifecycle.StartILMRequest; import org.elasticsearch.client.indexlifecycle.StopILMRequest; -import org.elasticsearch.client.indexlifecycle.ShrinkAction; import org.elasticsearch.client.indices.CreateIndexRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; @@ -337,11 +338,13 @@ public void testExplainLifecycle() throws Exception { new PutLifecyclePolicyRequest(policy); client.indexLifecycle().putLifecyclePolicy(putRequest, RequestOptions.DEFAULT); - CreateIndexRequest createIndexRequest = new CreateIndexRequest("my_index") + CreateIndexRequest createIndexRequest = new CreateIndexRequest("my_index-1") .settings(Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put("index.lifecycle.name", "my_policy") + .put("index.lifecycle.rollover_alias", "my_alias") .build()); + createIndexRequest.alias(new Alias("my_alias").writeIndex(true)); client.indices().create(createIndexRequest, RequestOptions.DEFAULT); CreateIndexRequest createOtherIndexRequest = new CreateIndexRequest("other_index") .settings(Settings.builder() @@ -352,58 +355,62 @@ public void testExplainLifecycle() throws Exception { // wait for the policy to become active assertBusy(() -> assertNotNull(client.indexLifecycle() - .explainLifecycle(new ExplainLifecycleRequest("my_index"), RequestOptions.DEFAULT) - .getIndexResponses().get("my_index").getAction())); + .explainLifecycle(new ExplainLifecycleRequest("my_index-1"), RequestOptions.DEFAULT) + .getIndexResponses().get("my_index-1").getAction())); } // tag::ilm-explain-lifecycle-request ExplainLifecycleRequest request = - new ExplainLifecycleRequest("my_index", "other_index"); // <1> + new ExplainLifecycleRequest("my_index-1", "other_index"); // <1> // end::ilm-explain-lifecycle-request - // tag::ilm-explain-lifecycle-execute - ExplainLifecycleResponse response = client.indexLifecycle() - .explainLifecycle(request, RequestOptions.DEFAULT); - // end::ilm-explain-lifecycle-execute - assertNotNull(response); - - // tag::ilm-explain-lifecycle-response - Map indices = - response.getIndexResponses(); - IndexLifecycleExplainResponse myIndex = indices.get("my_index"); - String policyName = myIndex.getPolicyName(); // <1> - boolean isManaged = myIndex.managedByILM(); // <2> - - String phase = myIndex.getPhase(); // <3> - long phaseTime = myIndex.getPhaseTime(); // <4> - String action = myIndex.getAction(); // <5> - long actionTime = myIndex.getActionTime(); - String step = myIndex.getStep(); // <6> - long stepTime = myIndex.getStepTime(); - - String failedStep = myIndex.getFailedStep(); // <7> - // end::ilm-explain-lifecycle-response - assertEquals("my_policy", policyName); - assertTrue(isManaged); - - assertEquals("hot", phase); - assertNotEquals(0, phaseTime); - assertEquals("rollover", action); - assertNotEquals(0, actionTime); - assertEquals("check-rollover-ready", step); - assertNotEquals(0, stepTime); - - assertNull(failedStep); - - IndexLifecycleExplainResponse otherIndex = indices.get("other_index"); - assertFalse(otherIndex.managedByILM()); - assertNull(otherIndex.getPolicyName()); - assertNull(otherIndex.getPhase()); - assertNull(otherIndex.getAction()); - assertNull(otherIndex.getStep()); - assertNull(otherIndex.getFailedStep()); - assertNull(otherIndex.getPhaseExecutionInfo()); - assertNull(otherIndex.getStepInfo()); + + assertBusy(() -> { + // tag::ilm-explain-lifecycle-execute + ExplainLifecycleResponse response = client.indexLifecycle() + .explainLifecycle(request, RequestOptions.DEFAULT); + // end::ilm-explain-lifecycle-execute + assertNotNull(response); + + // tag::ilm-explain-lifecycle-response + Map indices = + response.getIndexResponses(); + IndexLifecycleExplainResponse myIndex = indices.get("my_index-1"); + String policyName = myIndex.getPolicyName(); // <1> + boolean isManaged = myIndex.managedByILM(); // <2> + + String phase = myIndex.getPhase(); // <3> + long phaseTime = myIndex.getPhaseTime(); // <4> + String action = myIndex.getAction(); // <5> + long actionTime = myIndex.getActionTime(); + String step = myIndex.getStep(); // <6> + long stepTime = myIndex.getStepTime(); + + String failedStep = myIndex.getFailedStep(); // <7> + // end::ilm-explain-lifecycle-response + + assertEquals("my_policy", policyName); + assertTrue(isManaged); + + assertEquals("hot", phase); + assertNotEquals(0, phaseTime); + assertEquals("rollover", action); + assertNotEquals(0, actionTime); + assertEquals("check-rollover-ready", step); + assertNotEquals(0, stepTime); + + assertNull(failedStep); + + IndexLifecycleExplainResponse otherIndex = indices.get("other_index"); + assertFalse(otherIndex.managedByILM()); + assertNull(otherIndex.getPolicyName()); + assertNull(otherIndex.getPhase()); + assertNull(otherIndex.getAction()); + assertNull(otherIndex.getStep()); + assertNull(otherIndex.getFailedStep()); + assertNull(otherIndex.getPhaseExecutionInfo()); + assertNull(otherIndex.getStepInfo()); + }); // tag::ilm-explain-lifecycle-execute-listener ActionListener listener = diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index 881b58826b031..e16b414504a64 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -353,6 +353,13 @@ index format must match pattern '^.*-\\d+$', for example (`logs-000001`). The managed index must set `index.lifecycle.rollover_alias` as the alias to rollover. The index must also be the write index for the alias. +[IMPORTANT] +If a policy using the Rollover action is used on a <>, policy execution will wait until the leader index rolls over (or has +<>), then convert the +follower index into a regular index as if <> had been used instead of rolling over. + For example, if an index to be managed has an alias `my_data`. The managed index "my_index" must be the write index for the alias. For more information, read <>. @@ -578,6 +585,13 @@ PUT _ilm/policy/my_policy NOTE: Index will be be made read-only when this action is run (see: <>) +[IMPORTANT] +If a policy using the Shrink action is used on a <>, policy execution will wait until the leader index rolls over (or has +<>), then convert the +follower index into a regular index as if <> had been used before shrink is applied, as shrink cannot be safely +applied to follower indices. This action shrinks an existing index into a new index with fewer primary shards. It calls the <> to shrink the index. @@ -622,11 +636,27 @@ PUT _ilm/policy/my_policy [[ilm-unfollow-action]] ==== Unfollow +[IMPORTANT] +This action may be used explicitly, as shown below, but this action is also run +before <> and <> as described in the documentation for those actions. + This action turns a {ref}/ccr-apis.html[ccr] follower index into a regular index. This can be desired when moving follower indices into the next phase. Also certain actions like shrink and rollover can then be performed safely on follower indices. +This action will wait until is it safe to convert a follower index into a +regular index. In particular, the following conditions must be met: + +* The leader index must have `index.lifecycle.indexing_complete` set to `true`. +This happens automatically if the leader index is rolled over using +<>, or may be set manually using +the <>. +* All operations performed on the leader index must have been replicated to the +follower index. This ensures that no operations will be lost when the index is +converted into a regular index. + If the unfollow action encounters a follower index then the following operations will be performed on it: diff --git a/docs/reference/ilm/using-policies-rollover.asciidoc b/docs/reference/ilm/using-policies-rollover.asciidoc index 266346fb8629f..dbabbd3333635 100644 --- a/docs/reference/ilm/using-policies-rollover.asciidoc +++ b/docs/reference/ilm/using-policies-rollover.asciidoc @@ -123,6 +123,7 @@ When the rollover is performed, the newly-created index is set as the write index for the rolled over alias. Documents sent to the alias are indexed into the new index, enabling indexing to continue uninterrupted. +[[skipping-rollover]] === Skipping Rollover The `index.lifecycle.indexing_complete` setting indicates to {ilm} whether this diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java index 0e6486eecb7b0..e338d75a98f82 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/ReadOnlyAction.java @@ -25,7 +25,6 @@ */ public class ReadOnlyAction implements LifecycleAction { public static final String NAME = "readonly"; - public static final ReadOnlyAction INSTANCE = new ReadOnlyAction(); private static final ObjectParser PARSER = new ObjectParser<>(NAME, false, ReadOnlyAction::new); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleType.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleType.java index 4d1c770cea4bc..a7f8f92e6b829 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleType.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleType.java @@ -6,14 +6,12 @@ package org.elasticsearch.xpack.core.indexlifecycle; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -44,8 +42,6 @@ public class TimeseriesLifecycleType implements LifecycleType { static final Set VALID_WARM_ACTIONS = Sets.newHashSet(ORDERED_VALID_WARM_ACTIONS); static final Set VALID_COLD_ACTIONS = Sets.newHashSet(ORDERED_VALID_COLD_ACTIONS); static final Set VALID_DELETE_ACTIONS = Sets.newHashSet(ORDERED_VALID_DELETE_ACTIONS); - private static final Phase EMPTY_WARM_PHASE = new Phase("warm", TimeValue.ZERO, - Collections.singletonMap("readonly", ReadOnlyAction.INSTANCE)); private static Map> ALLOWED_ACTIONS = new HashMap<>(); static { @@ -72,6 +68,13 @@ public List getOrderedPhases(Map phases) { for (String phaseName : VALID_PHASES) { Phase phase = phases.get(phaseName); if (phase != null) { + Map actions = phase.getActions(); + if (actions.containsKey(UnfollowAction.NAME) == false + && (actions.containsKey(RolloverAction.NAME) || actions.containsKey(ShrinkAction.NAME))) { + Map actionMap = new HashMap<>(phase.getActions()); + actionMap.put(UnfollowAction.NAME, new UnfollowAction()); + phase = new Phase(phase.getName(), phase.getMinimumAge(), actionMap); + } orderedPhases.add(phase); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleTypeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleTypeTests.java index 4efb34873d471..ca1614e0bbe62 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleTypeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/TimeseriesLifecycleTypeTests.java @@ -152,6 +152,32 @@ public void testGetOrderedPhases() { assertTrue(isSorted(TimeseriesLifecycleType.INSTANCE.getOrderedPhases(phaseMap), Phase::getName, VALID_PHASES)); } + public void testUnfollowInjections() { + assertTrue(isUnfollowInjected("hot", RolloverAction.NAME)); + assertTrue(isUnfollowInjected("warm", ShrinkAction.NAME)); + + assertFalse(isUnfollowInjected("hot", SetPriorityAction.NAME)); + assertFalse(isUnfollowInjected("warm", SetPriorityAction.NAME)); + assertFalse(isUnfollowInjected("warm", AllocateAction.NAME)); + assertFalse(isUnfollowInjected("warm", ReadOnlyAction.NAME)); + assertFalse(isUnfollowInjected("warm", ForceMergeAction.NAME)); + assertFalse(isUnfollowInjected("cold", SetPriorityAction.NAME)); + assertFalse(isUnfollowInjected("cold", AllocateAction.NAME)); + assertFalse(isUnfollowInjected("cold", FreezeAction.NAME)); + assertFalse(isUnfollowInjected("delete", DeleteAction.NAME)); + + } + + private boolean isUnfollowInjected(String phaseName, String actionName) { + Map phaseMap = new HashMap<>(); + Map actionsMap = new HashMap<>(); + actionsMap.put(actionName, getTestAction(actionName)); + Phase warmPhase = new Phase(phaseName, TimeValue.ZERO, actionsMap); + phaseMap.put(phaseName, warmPhase); + List phases = TimeseriesLifecycleType.INSTANCE.getOrderedPhases(phaseMap); + Phase processedWarmPhase = phases.stream().filter(phase -> phase.getName().equals(phaseName)).findFirst().get(); + return processedWarmPhase.getActions().containsKey("unfollow"); + } public void testGetOrderedActionsInvalidPhase() { IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> TimeseriesLifecycleType.INSTANCE diff --git a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java index 9dbf32b376565..65baeb5f168c4 100644 --- a/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java +++ b/x-pack/plugin/ilm/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/indexlifecycle/CCRIndexLifecycleIT.java @@ -274,6 +274,97 @@ public void testCcrAndIlmWithRollover() throws Exception { } } + public void testUnfollowInjectedBeforeShrink() throws Exception { + final String indexName = "shrink-test"; + final String shrunkenIndexName = "shrink-" + indexName; + final String policyName = "shrink-test-policy"; + + if ("leader".equals(targetCluster)) { + Settings indexSettings = Settings.builder() + .put("index.soft_deletes.enabled", true) + .put("index.number_of_shards", 3) + .put("index.number_of_replicas", 0) + .put("index.lifecycle.name", policyName) // this policy won't exist on the leader, that's fine + .build(); + createIndex(indexName, indexSettings, "", ""); + ensureGreen(indexName); + } else if ("follow".equals(targetCluster)) { + // Create a policy with just a Shrink action on the follower + final XContentBuilder builder = jsonBuilder(); + builder.startObject(); + { + builder.startObject("policy"); + { + builder.startObject("phases"); + { + builder.startObject("warm"); + { + builder.startObject("actions"); + { + builder.startObject("shrink"); + { + builder.field("number_of_shards", 1); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + + // Sometimes throw in an extraneous unfollow just to check it doesn't break anything + if (randomBoolean()) { + builder.startObject("cold"); + { + builder.startObject("actions"); + { + builder.startObject("unfollow"); + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + + final Request request = new Request("PUT", "_ilm/policy/" + policyName); + request.setJsonEntity(Strings.toString(builder)); + assertOK(client().performRequest(request)); + + // Follow the index + followIndex(indexName, indexName); + // Make sure it actually took + assertBusy(() -> assertTrue(indexExists(indexName))); + // This should now be in the "warm" phase waiting for the index to be ready to unfollow + assertBusy(() -> assertILMPolicy(client(), indexName, policyName, "warm", "unfollow", "wait-for-indexing-complete")); + + // Set the indexing_complete flag on the leader so the index will actually unfollow + try (RestClient leaderClient = buildLeaderClient()) { + updateIndexSettings(leaderClient, indexName, Settings.builder() + .put("index.lifecycle.indexing_complete", true) + .build() + ); + } + + // Wait for the setting to get replicated + assertBusy(() -> assertThat(getIndexSetting(client(), indexName, "index.lifecycle.indexing_complete"), equalTo("true"))); + + // We can't reliably check that the index is unfollowed, because ILM + // moves through the unfollow and shrink actions so fast that the + // index often disappears between assertBusy checks + + // Wait for the index to continue with its lifecycle and be shrunk + assertBusy(() -> assertTrue(indexExists(shrunkenIndexName))); + + // Wait for the index to complete its policy + assertBusy(() -> assertILMPolicy(client(), shrunkenIndexName, policyName, "completed", "completed", "completed")); + } + } + private static void putILMPolicy(String name, String maxSize, Integer maxDocs, TimeValue maxAge) throws IOException { final Request request = new Request("PUT", "_ilm/policy/" + name); XContentBuilder builder = jsonBuilder(); @@ -299,7 +390,7 @@ private static void putILMPolicy(String name, String maxSize, Integer maxDocs, T } builder.endObject(); } - { + if (randomBoolean()) { builder.startObject("unfollow"); builder.endObject(); } @@ -310,6 +401,11 @@ private static void putILMPolicy(String name, String maxSize, Integer maxDocs, T { builder.startObject("actions"); { + // Sometimes throw in an extraneous unfollow just to check it doesn't break anything + if (randomBoolean()) { + builder.startObject("unfollow"); + builder.endObject(); + } builder.startObject("readonly"); builder.endObject(); } @@ -338,13 +434,26 @@ private static void putILMPolicy(String name, String maxSize, Integer maxDocs, T } private static void assertILMPolicy(RestClient client, String index, String policy, String expectedPhase) throws IOException { + assertILMPolicy(client, index, policy, expectedPhase, null, null); + } + + private static void assertILMPolicy(RestClient client, String index, String policy, String expectedPhase, + String expectedAction, String expectedStep) throws IOException { final Request request = new Request("GET", "/" + index + "/_ilm/explain"); Map response = toMap(client.performRequest(request)); LOGGER.info("response={}", response); Map explanation = (Map) ((Map) response.get("indices")).get(index); assertThat(explanation.get("managed"), is(true)); assertThat(explanation.get("policy"), equalTo(policy)); - assertThat(explanation.get("phase"), equalTo(expectedPhase)); + if (expectedPhase != null) { + assertThat(explanation.get("phase"), equalTo(expectedPhase)); + } + if (expectedAction != null) { + assertThat(explanation.get("action"), equalTo(expectedAction)); + } + if (expectedStep != null) { + assertThat(explanation.get("step"), equalTo(expectedStep)); + } } private static void updateIndexSettings(RestClient client, String index, Settings settings) throws IOException { diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java index 878ddec5e5fe9..675c24a4195b7 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java @@ -759,6 +759,42 @@ public void testRemoveAndReaddPolicy() throws Exception { assertBusy(() -> assertThat(getStepKeyForIndex(originalIndex), equalTo(TerminalPolicyStep.KEY))); } + public void testMoveToInjectedStep() throws Exception { + String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index; + createNewSingletonPolicy("warm", new ShrinkAction(1), TimeValue.timeValueHours(12)); + + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 3) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(LifecycleSettings.LIFECYCLE_NAME, policy) + .put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias")); + + assertBusy(() -> assertThat(getStepKeyForIndex(index), equalTo(new StepKey("new", "complete", "complete")))); + + // Move to a step from the injected unfollow action + Request moveToStepRequest = new Request("POST", "_ilm/move/" + index); + moveToStepRequest.setJsonEntity("{\n" + + " \"current_step\": { \n" + + " \"phase\": \"new\",\n" + + " \"action\": \"complete\",\n" + + " \"name\": \"complete\"\n" + + " },\n" + + " \"next_step\": { \n" + + " \"phase\": \"warm\",\n" + + " \"action\": \"unfollow\",\n" + + " \"name\": \"wait-for-indexing-complete\"\n" + + " }\n" + + "}"); + // If we get an OK on this request we have successfully moved to the injected step + assertOK(client().performRequest(moveToStepRequest)); + + // Make sure we actually move on to and execute the shrink action + assertBusy(() -> { + assertTrue(indexExists(shrunkenIndex)); + assertTrue(aliasExists(shrunkenIndex, index)); + assertThat(getStepKeyForIndex(shrunkenIndex), equalTo(TerminalPolicyStep.KEY)); + }); + } + private void createFullPolicy(TimeValue hotTime) throws IOException { Map hotActions = new HashMap<>(); hotActions.put(SetPriorityAction.NAME, new SetPriorityAction(100)); From 09b6028e153948e90c9f19d64a1de6aee5d68464 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 28 Jan 2019 15:57:27 -0800 Subject: [PATCH 41/57] Add painless context examples for update and update-by-query (#37943) This commit improves the example docs for contexts in painless. relates #34829 --- docs/build.gradle | 22 ++++++---- .../painless-update-by-query-context.asciidoc | 43 ++++++++++++++++++- .../painless-update-context.asciidoc | 26 ++++++++++- 3 files changed, 81 insertions(+), 10 deletions(-) diff --git a/docs/build.gradle b/docs/build.gradle index ada204949febd..e7112c08ac41e 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -1145,17 +1145,23 @@ buildRestTests.setups['seats'] = ''' type: keyword cost: type: long + row: + type: long + number: + type: long + sold: + type: boolean - do: bulk: index: seats type: _doc refresh: true body: | - {"index":{}} - {"theatre": "Skyline", "cost": 1} - {"index":{}} - {"theatre": "Graye", "cost": 5} - {"index":{}} - {"theatre": "Graye", "cost": 8} - {"index":{}} - {"theatre": "Skyline", "cost": 10}''' + {"index":{"_id": "1"}} + {"theatre": "Skyline", "cost": 37, "row": 1, "number": 7, "sold": false} + {"index":{"_id": "2"}} + {"theatre": "Graye", "cost": 30, "row": 3, "number": 5, "sold": false} + {"index":{"_id": "3"}} + {"theatre": "Graye", "cost": 33, "row": 2, "number": 6, "sold": false} + {"index":{"_id": "4"}} + {"theatre": "Skyline", "cost": 20, "row": 5, "number": 2, "sold": false}''' diff --git a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc index d8d7754807496..ba42105f2e901 100644 --- a/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc +++ b/docs/painless/painless-contexts/painless-update-by-query-context.asciidoc @@ -51,4 +51,45 @@ result of query. *API* -The standard <> is available. \ No newline at end of file +The standard <> is available. + +*Example* + +To run this example, first follow the steps in +<>. + +The following query finds all seats in a specific section that have not been +sold and lowers the price by 2: + +[source,js] +-------------------------------------------------- +POST /seats/_update_by_query +{ + "query": { + "bool": { + "filter": [ + { + "range": { + "row": { + "lte": 3 + } + } + }, + { + "match": { + "sold": false + } + }] + } + }, + "script": { + "source": "ctx._source.cost -= params.discount", + "lang": "painless", + "params": { + "discount": 2 + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:seats] \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-update-context.asciidoc b/docs/painless/painless-contexts/painless-update-context.asciidoc index d1b4b84eb187a..6ed8c2f7c13a3 100644 --- a/docs/painless/painless-contexts/painless-update-context.asciidoc +++ b/docs/painless/painless-contexts/painless-update-context.asciidoc @@ -52,4 +52,28 @@ add, modify, or delete fields within a single document. *API* -The standard <> is available. \ No newline at end of file +The standard <> is available. + +*Example* + +To run this example, first follow the steps in +<>. + +The following query updates a document to be sold, and sets the cost +to the actual price paid after discounts: + +[source,js] +-------------------------------------------------- +POST /seats/_update/3 +{ + "script": { + "source": "ctx._source.sold = true; ctx._source.cost = params.sold_cost", + "lang": "painless", + "params": { + "sold_cost": 26 + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:seats] \ No newline at end of file From 6ed35fbb945b35a65b0c56dbd3143f006c69e447 Mon Sep 17 00:00:00 2001 From: Like Date: Tue, 29 Jan 2019 08:01:09 +0800 Subject: [PATCH 42/57] Support merge nested Map in list for JIRA configurations (#37634) This commit allows JIRA API fields that require a list of key/value pairs (maps), such as JIRA "components" to use use template snippets (e.g. {{ctx.payload.foo}}). Prior to this change the templated value (not the de-referenced value) would be sent via the API and error. Closes #30068 --- .../actions/jira/ExecutableJiraAction.java | 2 + .../jira/ExecutableJiraActionTests.java | 52 +++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraAction.java index 89f9af8e1d529..0a5832539f3b6 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraAction.java @@ -88,6 +88,8 @@ static Map merge(final Map fields, final Map(), (Map) v, fn)); } else { newValues.add(v); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraActionTests.java index 4806412aeaa60..afc3d4dfb5582 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/jira/ExecutableJiraActionTests.java @@ -27,9 +27,11 @@ import org.joda.time.DateTimeZone; import org.mockito.ArgumentCaptor; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.function.Function; @@ -311,4 +313,54 @@ public String render(TextTemplate textTemplate, Map model) { return textTemplate.getTemplate().toUpperCase(Locale.ROOT); } } + + public void testMerge() { + Map writeableMap = new HashMap<>(); + Map mergeNull = ExecutableJiraAction.merge(writeableMap, null, s -> s); + assertTrue(mergeNull.isEmpty()); + Map map = new HashMap<>(); + map.put("foo", "bar"); + map.put("list", Arrays.asList("test1", "test2")); + Map valueMap = new HashMap<>(); + valueMap.put("var", "abc"); + map.put("map", valueMap); + Map componentMap = new HashMap<>(); + componentMap.put("name", "value"); + List> list = new ArrayList<>(); + list.add(componentMap); + map.put("components", list); + Map result = ExecutableJiraAction.merge(writeableMap, map, s -> s.toUpperCase(Locale.ROOT)); + assertThat(result, hasEntry("FOO", "BAR")); + assertThat(result.get("LIST"), instanceOf(List.class)); + List mergedList = (List) result.get("LIST"); + assertEquals(2, mergedList.size()); + assertEquals("TEST1", mergedList.get(0)); + assertEquals("TEST2", mergedList.get(1)); + Map mergedMap = (Map) result.get("MAP"); + assertEquals(1, mergedMap.size()); + assertEquals("ABC", mergedMap.get("VAR")); + assertThat(result.get("COMPONENTS"), instanceOf(List.class)); + List> components = (List>) result.get("COMPONENTS"); + assertThat(components.get(0), hasEntry("NAME", "VALUE")); + + // test the fields is not overwritten + Map fields = new HashMap<>(); + fields.put("FOO", "bob"); + fields.put("LIST", Arrays.asList("test3")); + fields.put("MAP", new HashMap<>()); + fields.put("COMPONENTS", new ArrayList<>()); + + result = ExecutableJiraAction.merge(fields, map, s -> s.toUpperCase(Locale.ROOT)); + assertThat(result, hasEntry("FOO", "bob")); + assertThat(result.get("LIST"), instanceOf(List.class)); + mergedList = (List) result.get("LIST"); + assertEquals(1, mergedList.size()); + assertEquals("test3", mergedList.get(0)); + mergedMap = (Map) result.get("MAP"); + assertTrue(mergedMap.isEmpty()); + assertThat(result.get("COMPONENTS"), instanceOf(List.class)); + components = (List>) result.get("COMPONENTS"); + assertTrue(components.isEmpty()); + } + } From 891320f5ac5d92a89dbe90f050919d0bd9f671a0 Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 29 Jan 2019 07:20:09 +0100 Subject: [PATCH 43/57] Elasticsearch support to JSON logging (#36833) In order to support JSON log format, a custom pattern layout was used and its configuration is enclosed in ESJsonLayout. Users are free to use their own patterns, but if smooth Beats integration is needed, they should use ESJsonLayout. EvilLoggerTests are left intact to make sure user's custom log patterns work fine. To populate additional fields node.id and cluster.uuid which are not available at start time, a cluster state update will have to be received and the values passed to log4j pattern converter. A ClusterStateObserver.Listener is used to receive only one ClusteStateUpdate. Once update is received the nodeId and clusterUUid are set in a static field in a NodeAndClusterIdConverter. Following fields are expected in JSON log lines: type, tiemstamp, level, component, cluster.name, node.name, node.id, cluster.uuid, message, stacktrace see ESJsonLayout.java for more details and field descriptions Docker log4j2 configuration is now almost the same as the one use for ES binary. The only difference is that docker is using console appenders, whereas ES is using file appenders. relates: #32850 --- .../archives/integ-test-zip/build.gradle | 2 +- ...sIT.java => JsonLogsFormatAndParseIT.java} | 8 +- .../src/docker/config/log4j2.properties | 44 +++- distribution/src/config/log4j2.properties | 120 +++++++-- docs/reference/migration/migrate_7_0.asciidoc | 2 + .../migration/migrate_7_0/logging.asciidoc | 33 +++ docs/reference/setup/logging-config.asciidoc | 106 ++++++-- qa/die-with-dignity/build.gradle | 2 +- .../qa/die_with_dignity/DieWithDignityIT.java | 78 ++++-- qa/logging-config/build.gradle | 42 ++++ qa/logging-config/custom-log4j2.properties | 31 +++ .../common/logging/JsonLoggerTests.java | 232 ++++++++++++++++++ .../custom_logging/CustomLoggingConfigIT.java | 72 ++++++ .../logging/json_layout/log4j2.properties | 21 ++ .../src/test/resources/plugin-security.policy | 4 + qa/unconfigured-node-name/build.gradle | 2 +- ...sIT.java => JsonLogsFormatAndParseIT.java} | 6 +- .../resources/packaging/tests/60_systemd.bats | 2 +- .../test/resources/packaging/utils/utils.bash | 2 +- .../common/logging/ESJsonLayout.java | 118 +++++++++ .../JsonThrowablePatternConverter.java | 105 ++++++++ .../logging/NodeAndClusterIdConverter.java | 78 ++++++ .../NodeAndClusterIdStateListener.java | 77 ++++++ .../java/org/elasticsearch/node/Node.java | 8 +- .../JsonThrowablePatternConverterTests.java | 93 +++++++ .../common/logging/JsonLogLine.java | 158 ++++++++++++ .../common/logging/JsonLogsIntegTestCase.java | 129 ++++++++++ .../common/logging/JsonLogsStream.java | 97 ++++++++ .../logging/NodeNameInLogsIntegTestCase.java | 101 -------- .../downgrade-to-basic-license/build.gradle | 5 +- .../xpack/ccr/FollowIndexIT.java | 51 ++-- 31 files changed, 1624 insertions(+), 205 deletions(-) rename distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/{NodeNameInLogsIT.java => JsonLogsFormatAndParseIT.java} (88%) create mode 100644 docs/reference/migration/migrate_7_0/logging.asciidoc create mode 100644 qa/logging-config/build.gradle create mode 100644 qa/logging-config/custom-log4j2.properties create mode 100644 qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java create mode 100644 qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java create mode 100644 qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties create mode 100644 qa/logging-config/src/test/resources/plugin-security.policy rename qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/{NodeNameInLogsIT.java => JsonLogsFormatAndParseIT.java} (92%) create mode 100644 server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java create mode 100644 server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java create mode 100644 server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java create mode 100644 server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java create mode 100644 server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java create mode 100644 test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java create mode 100644 test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java create mode 100644 test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java delete mode 100644 test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index 30fa4d3c03805..d79971907b50d 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -27,7 +27,7 @@ integTestRunner { */ if (System.getProperty("tests.rest.cluster") == null) { systemProperty 'tests.logfile', - "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }.log" + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json" } else { systemProperty 'tests.logfile', '--external--' } diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/JsonLogsFormatAndParseIT.java similarity index 88% rename from distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java rename to distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/JsonLogsFormatAndParseIT.java index a854e6e66462a..12c916946085b 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/NodeNameInLogsIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/JsonLogsFormatAndParseIT.java @@ -19,11 +19,11 @@ package org.elasticsearch.test.rest; -import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase; +import org.elasticsearch.common.logging.JsonLogsIntegTestCase; import org.hamcrest.Matcher; -import java.io.IOException; import java.io.BufferedReader; +import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.is; -public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase { +public class JsonLogsFormatAndParseIT extends JsonLogsIntegTestCase { @Override protected Matcher nodeNameMatcher() { return is("node-0"); @@ -41,7 +41,7 @@ protected Matcher nodeNameMatcher() { @Override protected BufferedReader openReader(Path logFile) { assumeFalse("Skipping test because it is being run against an external cluster.", - logFile.getFileName().toString().equals("--external--")); + logFile.getFileName().toString().equals("--external--")); return AccessController.doPrivileged((PrivilegedAction) () -> { try { return Files.newBufferedReader(logFile, StandardCharsets.UTF_8); diff --git a/distribution/docker/src/docker/config/log4j2.properties b/distribution/docker/src/docker/config/log4j2.properties index 9ad290ad82679..73420a047edc5 100644 --- a/distribution/docker/src/docker/config/log4j2.properties +++ b/distribution/docker/src/docker/config/log4j2.properties @@ -1,9 +1,43 @@ status = error -appender.console.type = Console -appender.console.name = console -appender.console.layout.type = PatternLayout -appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +# log action execution errors for easier debugging +logger.action.name = org.elasticsearch.action +logger.action.level = debug + +appender.rolling.type = Console +appender.rolling.name = rolling +appender.rolling.layout.type = ESJsonLayout +appender.rolling.layout.type_name = server rootLogger.level = info -rootLogger.appenderRef.console.ref = console +rootLogger.appenderRef.rolling.ref = rolling + +appender.deprecation_rolling.type = Console +appender.deprecation_rolling.name = deprecation_rolling +appender.deprecation_rolling.layout.type = ESJsonLayout +appender.deprecation_rolling.layout.type_name = deprecation + +logger.deprecation.name = org.elasticsearch.deprecation +logger.deprecation.level = warn +logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.additivity = false + +appender.index_search_slowlog_rolling.type = Console +appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling +appender.index_search_slowlog_rolling.layout.type = ESJsonLayout +appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog + +logger.index_search_slowlog_rolling.name = index.search.slowlog +logger.index_search_slowlog_rolling.level = trace +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.additivity = false + +appender.index_indexing_slowlog_rolling.type = Console +appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling +appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout +appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog + +logger.index_indexing_slowlog.name = index.indexing.slowlog.index +logger.index_indexing_slowlog.level = trace +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.additivity = false diff --git a/distribution/src/config/log4j2.properties b/distribution/src/config/log4j2.properties index 6de21cd48f67b..45bf720902c1c 100644 --- a/distribution/src/config/log4j2.properties +++ b/distribution/src/config/log4j2.properties @@ -9,12 +9,14 @@ appender.console.name = console appender.console.layout.type = PatternLayout appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +######## Server JSON ############################ appender.rolling.type = RollingFile appender.rolling.name = rolling -appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log -appender.rolling.layout.type = PatternLayout -appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n -appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json +appender.rolling.layout.type = ESJsonLayout +appender.rolling.layout.type_name = server + +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz appender.rolling.policies.type = Policies appender.rolling.policies.time.type = TimeBasedTriggeringPolicy appender.rolling.policies.time.interval = 1 @@ -29,58 +31,144 @@ appender.rolling.strategy.action.condition.type = IfFileName appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB +################################################ +######## Server - old style pattern ########### +appender.rolling_old.type = RollingFile +appender.rolling_old.name = rolling_old +appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log +appender.rolling_old.layout.type = PatternLayout +appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n + +appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling_old.policies.type = Policies +appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling_old.policies.time.interval = 1 +appender.rolling_old.policies.time.modulate = true +appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling_old.policies.size.size = 128MB +appender.rolling_old.strategy.type = DefaultRolloverStrategy +appender.rolling_old.strategy.fileIndex = nomax +appender.rolling_old.strategy.action.type = Delete +appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path} +appender.rolling_old.strategy.action.condition.type = IfFileName +appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* +appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize +appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB +################################################ rootLogger.level = info rootLogger.appenderRef.console.ref = console rootLogger.appenderRef.rolling.ref = rolling +rootLogger.appenderRef.rolling_old.ref = rolling_old +######## Deprecation JSON ####################### appender.deprecation_rolling.type = RollingFile appender.deprecation_rolling.name = deprecation_rolling -appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log -appender.deprecation_rolling.layout.type = PatternLayout -appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n -appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz +appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json +appender.deprecation_rolling.layout.type = ESJsonLayout +appender.deprecation_rolling.layout.type_name = deprecation + +appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz appender.deprecation_rolling.policies.type = Policies appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.deprecation_rolling.policies.size.size = 1GB appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy appender.deprecation_rolling.strategy.max = 4 +################################################# +######## Deprecation - old style pattern ####### +appender.deprecation_rolling_old.type = RollingFile +appender.deprecation_rolling_old.name = deprecation_rolling_old +appender.deprecation_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log +appender.deprecation_rolling_old.layout.type = PatternLayout +appender.deprecation_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +appender.deprecation_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _deprecation-%i.log.gz +appender.deprecation_rolling_old.policies.type = Policies +appender.deprecation_rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.deprecation_rolling_old.policies.size.size = 1GB +appender.deprecation_rolling_old.strategy.type = DefaultRolloverStrategy +appender.deprecation_rolling_old.strategy.max = 4 +################################################# logger.deprecation.name = org.elasticsearch.deprecation logger.deprecation.level = warn logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling +logger.deprecation.appenderRef.deprecation_rolling_old.ref = deprecation_rolling_old logger.deprecation.additivity = false +######## Search slowlog JSON #################### appender.index_search_slowlog_rolling.type = RollingFile appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling -appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log -appender.index_search_slowlog_rolling.layout.type = PatternLayout -appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n -appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%i.log.gz +appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ + .cluster_name}_index_search_slowlog.json +appender.index_search_slowlog_rolling.layout.type = ESJsonLayout +appender.index_search_slowlog_rolling.layout.type_name = index_search_slowlog + +appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\ + .cluster_name}_index_search_slowlog-%i.json.gz appender.index_search_slowlog_rolling.policies.type = Policies appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.index_search_slowlog_rolling.policies.size.size = 1GB appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy appender.index_search_slowlog_rolling.strategy.max = 4 +################################################# +######## Search slowlog - old style pattern #### +appender.index_search_slowlog_rolling_old.type = RollingFile +appender.index_search_slowlog_rolling_old.name = index_search_slowlog_rolling_old +appender.index_search_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_search_slowlog.log +appender.index_search_slowlog_rolling_old.layout.type = PatternLayout +appender.index_search_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +appender.index_search_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_search_slowlog-%i.log.gz +appender.index_search_slowlog_rolling_old.policies.type = Policies +appender.index_search_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.index_search_slowlog_rolling_old.policies.size.size = 1GB +appender.index_search_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy +appender.index_search_slowlog_rolling_old.strategy.max = 4 +################################################# logger.index_search_slowlog_rolling.name = index.search.slowlog logger.index_search_slowlog_rolling.level = trace logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling +logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling_old.ref = index_search_slowlog_rolling_old logger.index_search_slowlog_rolling.additivity = false +######## Indexing slowlog JSON ################## appender.index_indexing_slowlog_rolling.type = RollingFile appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling -appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log -appender.index_indexing_slowlog_rolling.layout.type = PatternLayout -appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n -appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz +appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog.json +appender.index_indexing_slowlog_rolling.layout.type = ESJsonLayout +appender.index_indexing_slowlog_rolling.layout.type_name = index_indexing_slowlog + +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog-%i.json.gz appender.index_indexing_slowlog_rolling.policies.type = Policies appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy appender.index_indexing_slowlog_rolling.policies.size.size = 1GB appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy appender.index_indexing_slowlog_rolling.strategy.max = 4 +################################################# +######## Indexing slowlog - old style pattern ## +appender.index_indexing_slowlog_rolling_old.type = RollingFile +appender.index_indexing_slowlog_rolling_old.name = index_indexing_slowlog_rolling_old +appender.index_indexing_slowlog_rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog.log +appender.index_indexing_slowlog_rolling_old.layout.type = PatternLayout +appender.index_indexing_slowlog_rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n + +appender.index_indexing_slowlog_rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\ + _index_indexing_slowlog-%i.log.gz +appender.index_indexing_slowlog_rolling_old.policies.type = Policies +appender.index_indexing_slowlog_rolling_old.policies.size.type = SizeBasedTriggeringPolicy +appender.index_indexing_slowlog_rolling_old.policies.size.size = 1GB +appender.index_indexing_slowlog_rolling_old.strategy.type = DefaultRolloverStrategy +appender.index_indexing_slowlog_rolling_old.strategy.max = 4 +################################################# logger.index_indexing_slowlog.name = index.indexing.slowlog.index logger.index_indexing_slowlog.level = trace logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling +logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling_old.ref = index_indexing_slowlog_rolling_old logger.index_indexing_slowlog.additivity = false diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index 9f99604318aa9..313fdfdfafbe5 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -25,6 +25,7 @@ See also <> and <>. * <> * <> * <> +* <> [float] === Indices created before 7.0 @@ -58,3 +59,4 @@ include::migrate_7_0/scripting.asciidoc[] include::migrate_7_0/snapshotstats.asciidoc[] include::migrate_7_0/restclient.asciidoc[] include::migrate_7_0/low_level_restclient.asciidoc[] +include::migrate_7_0/logging.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/logging.asciidoc b/docs/reference/migration/migrate_7_0/logging.asciidoc new file mode 100644 index 0000000000000..0385397b31619 --- /dev/null +++ b/docs/reference/migration/migrate_7_0/logging.asciidoc @@ -0,0 +1,33 @@ +[float] +[[breaking_70_logging_changes]] +=== Logging changes + +[float] +==== New JSON format log files in `log` directory + +Elasticsearch now will produce additional log files in JSON format. They will be stored in `*.json` suffix files. +Following files should be expected now in log directory: +* ${cluster_name}_server.json +* ${cluster_name}_deprecation.json +* ${cluster_name}_index_search_slowlog.json +* ${cluster_name}_index_indexing_slowlog.json +* ${cluster_name}.log +* ${cluster_name}_deprecation.log +* ${cluster_name}_index_search_slowlog.log +* ${cluster_name}_index_indexing_slowlog.log +* ${cluster_name}_audit.json +* gc.log + +Note: You can configure which of these files are written by editing `log4j2.properties`. + +[float] +==== Log files ending with `*.log` deprecated +Log files with the `.log` file extension using the old pattern layout format +are now considered deprecated and the newly added JSON log file format with +the `.json` file extension should be used instead. +Note: GC logs which are written to the file `gc.log` will not be changed. + +[float] +==== Docker output in JSON format + +All Docker console logs are now in JSON format. You can distinguish logs streams with the `type` field. diff --git a/docs/reference/setup/logging-config.asciidoc b/docs/reference/setup/logging-config.asciidoc index f477a14bb6d3d..dcea83a7f5d67 100644 --- a/docs/reference/setup/logging-config.asciidoc +++ b/docs/reference/setup/logging-config.asciidoc @@ -20,43 +20,62 @@ will resolve to `/var/log/elasticsearch/production.log`. [source,properties] -------------------------------------------------- +######## Server JSON ############################ appender.rolling.type = RollingFile <1> appender.rolling.name = rolling -appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log <2> -appender.rolling.layout.type = PatternLayout -appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n -appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz <3> +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json <2> +appender.rolling.layout.type = ESJsonLayout <3> +appender.rolling.layout.type_name = server <4> +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz <5> appender.rolling.policies.type = Policies -appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <4> -appender.rolling.policies.time.interval = 1 <5> -appender.rolling.policies.time.modulate = true <6> -appender.rolling.policies.size.type = SizeBasedTriggeringPolicy <7> -appender.rolling.policies.size.size = 256MB <8> +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <6> +appender.rolling.policies.time.interval = 1 <7> +appender.rolling.policies.time.modulate = true <8> +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy <9> +appender.rolling.policies.size.size = 256MB <10> appender.rolling.strategy.type = DefaultRolloverStrategy appender.rolling.strategy.fileIndex = nomax -appender.rolling.strategy.action.type = Delete <9> +appender.rolling.strategy.action.type = Delete <11> appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} -appender.rolling.strategy.action.condition.type = IfFileName <10> -appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* <11> -appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize <12> -appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <13> +appender.rolling.strategy.action.condition.type = IfFileName <12> +appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* <13> +appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize <14> +appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB <15> +################################################ -------------------------------------------------- <1> Configure the `RollingFile` appender -<2> Log to `/var/log/elasticsearch/production.log` -<3> Roll logs to `/var/log/elasticsearch/production-yyyy-MM-dd-i.log`; logs +<2> Log to `/var/log/elasticsearch/production.json` +<3> Use JSON layout. +<4> `type_name` is a flag populating the `type` field in a `ESJsonLayout`. + It can be used to distinguish different types of logs more easily when parsing them. +<5> Roll logs to `/var/log/elasticsearch/production-yyyy-MM-dd-i.json`; logs will be compressed on each roll and `i` will be incremented -<4> Use a time-based roll policy -<5> Roll logs on a daily basis -<6> Align rolls on the day boundary (as opposed to rolling every twenty-four +<6> Use a time-based roll policy +<7> Roll logs on a daily basis +<8> Align rolls on the day boundary (as opposed to rolling every twenty-four hours) -<7> Using a size-based roll policy -<8> Roll logs after 256 MB -<9> Use a delete action when rolling logs -<10> Only delete logs matching a file pattern -<11> The pattern is to only delete the main logs -<12> Only delete if we have accumulated too many compressed logs -<13> The size condition on the compressed logs is 2 GB +<9> Using a size-based roll policy +<10> Roll logs after 256 MB +<11> Use a delete action when rolling logs +<12> Only delete logs matching a file pattern +<13> The pattern is to only delete the main logs +<14> Only delete if we have accumulated too many compressed logs +<15> The size condition on the compressed logs is 2 GB + +[source,properties] +-------------------------------------------------- +######## Server - old style pattern ########### +appender.rolling_old.type = RollingFile +appender.rolling_old.name = rolling_old +appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log <1> +appender.rolling_old.layout.type = PatternLayout +appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n +appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.old_log.gz + +-------------------------------------------------- +<1> The configuration for `old style` pattern appenders. These logs will be saved in `*.log` files and if archived will be in `* +.log.gz` files. Note that these should be considered deprecated and will be removed in the future. NOTE: Log4j's configuration parsing gets confused by any extraneous whitespace; if you copy and paste any Log4j settings on this page, or enter any Log4j @@ -194,3 +213,38 @@ files (four rolled logs, and the active log). You can disable it in the `config/log4j2.properties` file by setting the deprecation log level to `error`. + + +[float] +[[json-logging]] +=== JSON log format + +To make parsing Elasticsearch logs easier, logs are now printed in a JSON format. +This is configured by a Log4J layout property `appender.rolling.layout.type = ESJsonLayout`. +This layout requires a `type_name` attribute to be set which is used to distinguish +logs streams when parsing. +[source,properties] +-------------------------------------------------- +appender.rolling.layout.type = ESJsonLayout +appender.rolling.layout.type_name = server +-------------------------------------------------- +:es-json-layout-java-doc: {elasticsearch-javadoc}/org/elasticsearch/common/logging/ESJsonLayout.html + +Each line contains a single JSON document with the properties configured in `ESJsonLayout`. +See this class {es-json-layout-java-doc}[javadoc] for more details. +However if a JSON document contains an exception, it will be printed over multiple lines. +The first line will contain regular properties and subsequent lines will contain the +stacktrace formatted as a JSON array. + + +NOTE: You can still use your own custom layout. To do that replace the line +`appender.rolling.layout.type` with a different layout. See sample below: +[source,properties] +-------------------------------------------------- +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz +-------------------------------------------------- \ No newline at end of file diff --git a/qa/die-with-dignity/build.gradle b/qa/die-with-dignity/build.gradle index 26d567ca3ef6c..3b2e21fd557e7 100644 --- a/qa/die-with-dignity/build.gradle +++ b/qa/die-with-dignity/build.gradle @@ -28,7 +28,7 @@ integTestRunner { systemProperty 'tests.security.manager', 'false' systemProperty 'tests.system_call_filter', 'false' systemProperty 'pidfile', "${-> integTest.getNodes().get(0).pidFile}" - systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}.log" + systemProperty 'log', "${-> integTest.getNodes().get(0).homeDir}/logs/${-> integTest.getNodes().get(0).clusterName}_server.json" systemProperty 'runtime.java.home', "${project.runtimeJavaHome}" } diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 9250122025c0a..16398b380cfe1 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -21,10 +21,14 @@ import org.apache.http.ConnectionClosedException; import org.apache.lucene.util.Constants; +import org.elasticsearch.cli.Terminal; import org.elasticsearch.client.Request; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.JsonLogLine; +import org.elasticsearch.common.logging.JsonLogsStream; import org.elasticsearch.test.rest.ESRestTestCase; import org.hamcrest.Matcher; +import org.hamcrest.Matchers; import java.io.BufferedReader; import java.io.IOException; @@ -34,10 +38,12 @@ import java.nio.file.Path; import java.util.Iterator; import java.util.List; +import java.util.stream.Stream; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; @@ -53,7 +59,7 @@ public void testDieWithDignity() throws Exception { final int pid = Integer.parseInt(pidFileLines.get(0)); Files.delete(pidFile); IOException e = expectThrows(IOException.class, - () -> client().performRequest(new Request("GET", "/_die_with_dignity"))); + () -> client().performRequest(new Request("GET", "/_die_with_dignity"))); Matcher failureMatcher = instanceOf(ConnectionClosedException.class); if (Constants.WINDOWS) { /* @@ -64,9 +70,9 @@ public void testDieWithDignity() throws Exception { * https://issues.apache.org/jira/browse/HTTPASYNC-134 * * So we catch it here and consider it "ok". - */ + */ failureMatcher = either(failureMatcher) - .or(hasToString(containsString("An existing connection was forcibly closed by the remote host"))); + .or(hasToString(containsString("An existing connection was forcibly closed by the remote host"))); } assertThat(e, failureMatcher); @@ -85,28 +91,62 @@ public void testDieWithDignity() throws Exception { } }); - // parse the logs and ensure that Elasticsearch died with the expected cause - final List lines = Files.readAllLines(PathUtils.get(System.getProperty("log"))); + try { + // parse the logs and ensure that Elasticsearch died with the expected cause + Path path = PathUtils.get(System.getProperty("log")); + try (Stream stream = JsonLogsStream.from(path)) { + final Iterator it = stream.iterator(); - final Iterator it = lines.iterator(); + boolean fatalError = false; + boolean fatalErrorInThreadExiting = false; - boolean fatalError = false; - boolean fatalErrorInThreadExiting = false; + while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { + final JsonLogLine line = it.next(); + if (isFatalError(line)) { + fatalError = true; + } else if (isFatalErrorInThreadExiting(line) || isWarnExceptionReceived(line)) { + fatalErrorInThreadExiting = true; + assertThat(line.stacktrace(), + hasItem(Matchers.containsString("java.lang.OutOfMemoryError: die with dignity"))); + } + } - while (it.hasNext() && (fatalError == false || fatalErrorInThreadExiting == false)) { - final String line = it.next(); - if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.ExceptionsHelper\\s*\\] \\[node-0\\] fatal error")) { - fatalError = true; - } else if (line.matches(".*\\[ERROR\\]\\[o\\.e\\.b\\.ElasticsearchUncaughtExceptionHandler\\] \\[node-0\\]" - + " fatal error in thread \\[Thread-\\d+\\], exiting$")) { - fatalErrorInThreadExiting = true; - assertTrue(it.hasNext()); - assertThat(it.next(), equalTo("java.lang.OutOfMemoryError: die with dignity")); + assertTrue(fatalError); + assertTrue(fatalErrorInThreadExiting); } + } catch (AssertionError ae) { + Path path = PathUtils.get(System.getProperty("log")); + debugLogs(path); + throw ae; } + } + + private boolean isWarnExceptionReceived(JsonLogLine line) { + return line.level().equals("WARN") + && line.component().equals("o.e.h.AbstractHttpServerTransport") + && line.nodeName().equals("node-0") + && line.message().contains("caught exception while handling client http traffic"); + } + + private void debugLogs(Path path) throws IOException { + try (BufferedReader reader = Files.newBufferedReader(path)) { + Terminal terminal = Terminal.DEFAULT; + reader.lines().forEach(line -> terminal.println(line)); + } + } + + private boolean isFatalErrorInThreadExiting(JsonLogLine line) { + return line.level().equals("ERROR") + && line.component().equals("o.e.b.ElasticsearchUncaughtExceptionHandler") + && line.nodeName().equals("node-0") + && line.message().matches("fatal error in thread \\[Thread-\\d+\\], exiting$"); + } - assertTrue(fatalError); - assertTrue(fatalErrorInThreadExiting); + private boolean isFatalError(JsonLogLine line) { + return line.level().equals("ERROR") + && line.component().equals("o.e.ExceptionsHelper") + && line.nodeName().equals("node-0") + && line.message().contains("fatal error"); } @Override diff --git a/qa/logging-config/build.gradle b/qa/logging-config/build.gradle new file mode 100644 index 0000000000000..0abdc1247514a --- /dev/null +++ b/qa/logging-config/build.gradle @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' +apply plugin: 'elasticsearch.standalone-test' + +integTestCluster { + autoSetInitialMasterNodes = false + autoSetHostsProvider = false + /** + * Provide a custom log4j configuration where layout is an old style pattern and confirm that Elasticsearch + * can successfully startup. + */ + extraConfigFile 'log4j2.properties', 'custom-log4j2.properties' +} + +integTestRunner { + systemProperty 'tests.logfile', + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.log" +} + +unitTest { + systemProperty 'tests.security.manager', 'false' +} diff --git a/qa/logging-config/custom-log4j2.properties b/qa/logging-config/custom-log4j2.properties new file mode 100644 index 0000000000000..b225d7cd550cf --- /dev/null +++ b/qa/logging-config/custom-log4j2.properties @@ -0,0 +1,31 @@ + +status = error + +# log action execution errors for easier debugging +logger.action.name = org.elasticsearch.action +logger.action.level = debug + +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern =%notEmpty{%node_name} %notEmpty{%node_and_cluster_id} %notEmpty{${sys:es.logs.cluster_name}} %m%n + +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling.policies.size.size = 128MB +appender.rolling.strategy.type = DefaultRolloverStrategy +appender.rolling.strategy.fileIndex = nomax +appender.rolling.strategy.action.type = Delete +appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path} +appender.rolling.strategy.action.condition.type = IfFileName +appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-* +appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize +appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB + +rootLogger.level = info +rootLogger.appenderRef.rolling.ref = rolling diff --git a/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java new file mode 100644 index 0000000000000..b484ba90a4da3 --- /dev/null +++ b/qa/logging-config/src/test/java/org/elasticsearch/common/logging/JsonLoggerTests.java @@ -0,0 +1,232 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configurator; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.FeatureMatcher; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * This test confirms JSON log structure is properly formatted and can be parsed. + * It has to be in a org.elasticsearch.common.logging package to use PrefixLogger + */ +public class JsonLoggerTests extends ESTestCase { + + @BeforeClass + public static void initNodeName() { + LogConfigurator.setNodeName("sample-name"); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + LogConfigurator.registerErrorListener(); + setupLogging("json_layout"); + } + + @Override + public void tearDown() throws Exception { + LoggerContext context = (LoggerContext) LogManager.getContext(false); + Configurator.shutdown(context); + super.tearDown(); + } + + @SuppressWarnings("unchecked") + public void testJsonLayout() throws IOException { + final Logger testLogger = LogManager.getLogger("test"); + + testLogger.error("This is an error message"); + testLogger.warn("This is a warning message"); + testLogger.info("This is an info message"); + testLogger.debug("This is a debug message"); + testLogger.trace("This is a trace message"); + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { + List jsonLogs = collectLines(stream); + + assertThat(jsonLogs, Matchers.contains( + logLine("file", Level.ERROR, "sample-name", "test", "This is an error message"), + logLine("file", Level.WARN, "sample-name", "test", "This is a warning message"), + logLine("file", Level.INFO, "sample-name", "test", "This is an info message"), + logLine("file", Level.DEBUG, "sample-name", "test", "This is a debug message"), + logLine("file", Level.TRACE, "sample-name", "test", "This is a trace message") + )); + } + } + + @SuppressWarnings("unchecked") + public void testPrefixLoggerInJson() throws IOException { + Logger shardIdLogger = Loggers.getLogger("shardIdLogger", ShardId.fromString("[indexName][123]")); + shardIdLogger.info("This is an info message with a shardId"); + + Logger prefixLogger = new PrefixLogger(LogManager.getLogger("prefixLogger"), "PREFIX"); + prefixLogger.info("This is an info message with a prefix"); + + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { + List jsonLogs = collectLines(stream); + assertThat(jsonLogs, Matchers.contains( + logLine("file", Level.INFO, "sample-name", "shardIdLogger", "[indexName][123] This is an info message with a shardId"), + logLine("file", Level.INFO, "sample-name", "prefixLogger", "PREFIX This is an info message with a prefix") + )); + } + } + + public void testJsonInMessage() throws IOException { + final Logger testLogger = LogManager.getLogger("test"); + String json = "{\n" + + " \"terms\" : {\n" + + " \"user\" : [\n" + + " \"u1\",\n" + + " \"u2\",\n" + + " \"u3\"\n" + + " ],\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + + testLogger.info(json); + + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { + List jsonLogs = collectLines(stream); + assertThat(jsonLogs, Matchers.contains( + logLine("file", Level.INFO, "sample-name", "test", json) + )); + } + } + + public void testStacktrace() throws IOException { + final Logger testLogger = LogManager.getLogger("test"); + testLogger.error("error message", new Exception("exception message", new RuntimeException("cause message"))); + + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { + List jsonLogs = collectLines(stream); + assertThat(jsonLogs, Matchers.contains( + Matchers.allOf( + logLine("file", Level.ERROR, "sample-name", "test", "error message"), + stacktraceWith("java.lang.Exception: exception message"), + stacktraceWith("Caused by: java.lang.RuntimeException: cause message") + ) + )); + } + } + + public void testJsonInStacktraceMessageIsSplitted() throws IOException { + final Logger testLogger = LogManager.getLogger("test"); + + String json = "{\n" + + " \"terms\" : {\n" + + " \"user\" : [\n" + + " \"u1\",\n" + + " \"u2\",\n" + + " \"u3\"\n" + + " ],\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + testLogger.error("error message " + json, new Exception(json)); + + final Path path = clusterLogsPath(); + try (Stream stream = JsonLogsStream.from(path)) { + List jsonLogs = collectLines(stream); + + assertThat(jsonLogs, Matchers.contains( + Matchers.allOf( + //message field will have a single line with json escaped + logLine("file", Level.ERROR, "sample-name", "test", "error message " + json), + + //stacktrace field will have each json line will in a separate array element + stacktraceWith(("java.lang.Exception: " + json).split("\n")) + ) + )); + } + } + + private List collectLines(Stream stream) { + return stream + .skip(1)//skip the first line from super class + .collect(Collectors.toList()); + } + + private Path clusterLogsPath() { + return PathUtils.get(System.getProperty("es.logs.base_path"), System.getProperty("es.logs.cluster_name") + ".log"); + } + + private void setupLogging(final String config) throws IOException, UserException { + setupLogging(config, Settings.EMPTY); + } + + private void setupLogging(final String config, final Settings settings) throws IOException, UserException { + assertFalse("Environment path.home variable should not be set", Environment.PATH_HOME_SETTING.exists(settings)); + final Path configDir = getDataPath(config); + final Settings mergedSettings = Settings.builder() + .put(settings) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + // need to use custom config path so we can use a custom log4j2.properties file for the test + final Environment environment = new Environment(mergedSettings, configDir); + LogConfigurator.configure(environment); + } + + private Matcher logLine(String type, Level level, String nodeName, String component, String message) { + return new FeatureMatcher(Matchers.is(true), "logLine", "logLine") { + + @Override + protected Boolean featureValueOf(JsonLogLine actual) { + return actual.type().equals(type) && + actual.level().equals(level.toString()) && + actual.nodeName().equals(nodeName) && + actual.component().equals(component) && + actual.message().equals(message); + } + }; + } + + private Matcher stacktraceWith(String... lines) { + return new FeatureMatcher>(Matchers.hasItems(lines), + "stacktrace", "stacktrace") { + + @Override + protected List featureValueOf(JsonLogLine actual) { + return actual.stacktrace(); + } + }; + } +} diff --git a/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java new file mode 100644 index 0000000000000..407d23de99769 --- /dev/null +++ b/qa/logging-config/src/test/java/org/elasticsearch/qa/custom_logging/CustomLoggingConfigIT.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.qa.custom_logging; + +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.test.hamcrest.RegexMatcher; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.List; + +/** + * This test verifies that Elasticsearch can startup successfully with a custom logging config using variables introduced in + * ESJsonLayout + * The intention is to confirm that users can still run their Elasticsearch instances with previous configurations. + */ +public class CustomLoggingConfigIT extends ESRestTestCase { + private static final String NODE_STARTED = ".*node-0.*cluster.uuid.*node.id.*started.*"; + + public void testSuccessfulStartupWithCustomConfig() throws Exception { + assertBusy(() -> { + List lines = readAllLines(getLogFile()); + assertThat(lines, Matchers.hasItem(RegexMatcher.matches(NODE_STARTED))); + }); + } + + private List readAllLines(Path logFile) { + return AccessController.doPrivileged((PrivilegedAction>) () -> { + try { + return Files.readAllLines(logFile, StandardCharsets.UTF_8); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }); + } + + @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") + private Path getLogFile() { + String logFileString = System.getProperty("tests.logfile"); + if (logFileString == null) { + fail("tests.logfile must be set to run this test. It is automatically " + + "set by gradle. If you must set it yourself then it should be the absolute path to the " + + "log file."); + } + return Paths.get(logFileString); + } +} diff --git a/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties b/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties new file mode 100644 index 0000000000000..4bbd0b038ab8a --- /dev/null +++ b/qa/logging-config/src/test/resources/org/elasticsearch/common/logging/json_layout/log4j2.properties @@ -0,0 +1,21 @@ +appender.console.type = Console +appender.console.name = console +appender.console.layout.type = ESJsonLayout +appender.console.layout.type_name = console + +appender.file.type = File +appender.file.name = file +appender.file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log +appender.file.layout.type = ESJsonLayout +appender.file.layout.type_name = file + + +rootLogger.level = info +rootLogger.appenderRef.console.ref = console +rootLogger.appenderRef.file.ref = file + +logger.test.name = test +logger.test.level = trace +logger.test.appenderRef.console.ref = console +logger.test.appenderRef.file.ref = file +logger.test.additivity = false diff --git a/qa/logging-config/src/test/resources/plugin-security.policy b/qa/logging-config/src/test/resources/plugin-security.policy new file mode 100644 index 0000000000000..d0d865c4ede16 --- /dev/null +++ b/qa/logging-config/src/test/resources/plugin-security.policy @@ -0,0 +1,4 @@ +grant { + // Needed to read the log file + permission java.io.FilePermission "${tests.logfile}", "read"; +}; diff --git a/qa/unconfigured-node-name/build.gradle b/qa/unconfigured-node-name/build.gradle index f8fb696e8ca85..5aba0562e03f6 100644 --- a/qa/unconfigured-node-name/build.gradle +++ b/qa/unconfigured-node-name/build.gradle @@ -30,5 +30,5 @@ integTestCluster { integTestRunner { systemProperty 'tests.logfile', - "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }.log" + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_server.json" } diff --git a/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeNameInLogsIT.java b/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java similarity index 92% rename from qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeNameInLogsIT.java rename to qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java index 44d5bb6c900f5..50cc20b0e5789 100644 --- a/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/NodeNameInLogsIT.java +++ b/qa/unconfigured-node-name/src/test/java/org/elasticsearch/unconfigured_node_name/JsonLogsFormatAndParseIT.java @@ -19,11 +19,11 @@ package org.elasticsearch.unconfigured_node_name; -import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase; +import org.elasticsearch.common.logging.JsonLogsIntegTestCase; import org.hamcrest.Matcher; -import java.io.IOException; import java.io.BufferedReader; +import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -32,7 +32,7 @@ import static org.hamcrest.Matchers.not; -public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase { +public class JsonLogsFormatAndParseIT extends JsonLogsIntegTestCase { @Override protected Matcher nodeNameMatcher() { return not(""); diff --git a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats index 3cf495939aff9..8baa75f38f5bc 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats @@ -98,7 +98,7 @@ setup() { systemctl start elasticsearch.service wait_for_elasticsearch_status assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - assert_file_exist "/var/log/elasticsearch/elasticsearch.log" + assert_file_exist "/var/log/elasticsearch/elasticsearch_server.json" # Converts the epoch back in a human readable format run date --date=@$epoch "+%Y-%m-%d %H:%M:%S" diff --git a/qa/vagrant/src/test/resources/packaging/utils/utils.bash b/qa/vagrant/src/test/resources/packaging/utils/utils.bash index 92363d4d4e348..3f577668bf1ec 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/utils.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/utils.bash @@ -428,7 +428,7 @@ describe_port() { } debug_collect_logs() { - local es_logfile="$ESLOG/elasticsearch.log" + local es_logfile="$ESLOG/elasticsearch_server.json" local system_logfile='/var/log/messages' if [ -e "$es_logfile" ]; then diff --git a/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java new file mode 100644 index 0000000000000..af7cd81f202e3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/ESJsonLayout.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.core.Layout; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.config.Node; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.config.plugins.PluginAttribute; +import org.apache.logging.log4j.core.config.plugins.PluginFactory; +import org.apache.logging.log4j.core.layout.AbstractStringLayout; +import org.apache.logging.log4j.core.layout.ByteBufferDestination; +import org.apache.logging.log4j.core.layout.PatternLayout; +import org.elasticsearch.common.Strings; + +import java.nio.charset.Charset; +import java.util.Map; + +/** + * Formats log events as strings in a json format. + *

+ * The class is wrapping the {@link PatternLayout} with a pattern to format into json. This gives more flexibility and control over how the + * log messages are formatted in {@link org.apache.logging.log4j.core.layout.JsonLayout} + */ +@Plugin(name = "ESJsonLayout", category = Node.CATEGORY, elementType = Layout.ELEMENT_TYPE, printObject = true) +public class ESJsonLayout extends AbstractStringLayout { + /** + * Fields used in a pattern to format a json log line: + *

    + *
  • type - the type of logs. These represent appenders and help docker distinguish log streams.
  • + *
  • timestamp - ISO8601 with additional timezone ID
  • + *
  • level - INFO, WARN etc
  • + *
  • component - logger name, most of the times class name
  • + *
  • cluster.name - taken from sys:es.logs.cluster_name system property because it is always set
  • + *
  • node.name - taken from NodeNamePatternConverter, as it can be set in runtime as hostname when not set in elasticsearch.yml
  • + *
  • node_and_cluster_id - in json as node.id and cluster.uuid - taken from NodeAndClusterIdConverter and present + * once clusterStateUpdate is first received
  • + *
  • message - a json escaped message. Multiline messages will be converted to single line with new line explicitly + * replaced to \n
  • + *
  • exceptionAsJson - in json as a stacktrace field. Only present when throwable is passed as a parameter when using a logger. + * Taken from JsonThrowablePatternConverter
  • + *
+ */ + private static final String PATTERN = "{" + + "\"type\": \"${TYPE}\", " + + "\"timestamp\": \"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}\", " + + "\"level\": \"%p\", " + + "\"component\": \"%c{1.}\", " + + "\"cluster.name\": \"${sys:es.logs.cluster_name}\", " + + "\"node.name\": \"%node_name\", " + + "%notEmpty{%node_and_cluster_id, } " + + "\"message\": \"%notEmpty{%enc{%marker}{JSON} }%enc{%.-10000m}{JSON}\" " + + "%exceptionAsJson " + + "}%n"; + + private final PatternLayout patternLayout; + + protected ESJsonLayout(String typeName, Charset charset) { + super(charset); + this.patternLayout = PatternLayout.newBuilder() + .withPattern(pattern(typeName)) + .withAlwaysWriteExceptions(false) + .build(); + } + + private String pattern(String type) { + if (Strings.isEmpty(type)) { + throw new IllegalArgumentException("layout parameter 'type_name' cannot be empty"); + } + return PATTERN.replace("${TYPE}", type); + } + + @PluginFactory + public static ESJsonLayout createLayout(@PluginAttribute("type_name") String type, + @PluginAttribute(value = "charset", defaultString = "UTF-8") Charset charset) { + return new ESJsonLayout(type, charset); + } + + @Override + public String toSerializable(final LogEvent event) { + return patternLayout.toSerializable(event); + } + + @Override + public Map getContentFormat() { + return patternLayout.getContentFormat(); + } + + @Override + public void encode(final LogEvent event, final ByteBufferDestination destination) { + patternLayout.encode(event, destination); + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder("ESJsonLayout{"); + sb.append("patternLayout=").append(patternLayout); + sb.append('}'); + return sb.toString(); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java new file mode 100644 index 0000000000000..97e712512317b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/JsonThrowablePatternConverter.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache license, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the license for the specific language governing permissions and + * limitations under the license. + */ +package org.elasticsearch.common.logging; + +import com.fasterxml.jackson.core.io.JsonStringEncoder; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.pattern.ConverterKeys; +import org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter; +import org.apache.logging.log4j.core.pattern.PatternConverter; +import org.apache.logging.log4j.core.pattern.ThrowablePatternConverter; +import org.apache.logging.log4j.util.Strings; + +import java.nio.charset.Charset; +import java.util.StringJoiner; + +/** + + * Outputs the Throwable portion of the LoggingEvent as a Json formatted field with array + * "exception": [ "stacktrace", "lines", "as", "array", "elements" ] + * + * Reusing @link org.apache.logging.log4j.core.pattern.ExtendedThrowablePatternConverter which already converts a Throwable from + * LoggingEvent into a multiline string + */ +@Plugin(name = "JsonThrowablePatternConverter", category = PatternConverter.CATEGORY) +@ConverterKeys({"exceptionAsJson"}) +public final class JsonThrowablePatternConverter extends ThrowablePatternConverter { + private final ExtendedThrowablePatternConverter throwablePatternConverter; + + /** + * Private as only expected to be used by log4j2 newInstance method + */ + private JsonThrowablePatternConverter(final Configuration config, final String[] options) { + super("JsonThrowablePatternConverter", "throwable", options, config); + this.throwablePatternConverter = ExtendedThrowablePatternConverter.newInstance(config, options); + } + + /** + * Gets an instance of the class. + * + * @param config The current Configuration. + * @param options pattern options, may be null. If first element is "short", + * only the first line of the throwable will be formatted. + * @return instance of class. + */ + public static JsonThrowablePatternConverter newInstance(final Configuration config, final String[] options) { + return new JsonThrowablePatternConverter(config, options); + } + + /** + * {@inheritDoc} + */ + @Override + public void format(final LogEvent event, final StringBuilder toAppendTo) { + String consoleStacktrace = formatStacktrace(event); + if (Strings.isNotEmpty(consoleStacktrace)) { + String jsonStacktrace = formatJson(consoleStacktrace); + + toAppendTo.append(", "); + toAppendTo.append(jsonStacktrace); + } + } + + private String formatStacktrace(LogEvent event) { + StringBuilder stringBuilder = new StringBuilder(); + throwablePatternConverter.format(event, stringBuilder); + return stringBuilder.toString(); + } + + private String formatJson(String consoleStacktrace) { + String lineSeparator = options.getSeparator() + "\t|" + options.getSeparator(); + String[] split = consoleStacktrace.split(lineSeparator); + + StringJoiner stringJoiner = new StringJoiner(",\n", "\n\"stacktrace\": [", "]"); + for (String line : split) { + stringJoiner.add(wrapAsJson(line)); + } + return stringJoiner.toString(); + } + + private String wrapAsJson(String line) { + byte[] bytes = JsonStringEncoder.getInstance().quoteAsUTF8(line); + return "\"" + new String(bytes, Charset.defaultCharset()) + "\""; + } + + @Override + public boolean handlesThrowable() { + return true; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java new file mode 100644 index 0000000000000..27437947870b4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdConverter.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.config.plugins.Plugin; +import org.apache.logging.log4j.core.pattern.ConverterKeys; +import org.apache.logging.log4j.core.pattern.LogEventPatternConverter; +import org.apache.logging.log4j.core.pattern.PatternConverter; +import org.apache.lucene.util.SetOnce; + +import java.util.Locale; + +/** + * Pattern converter to format the node_and_cluster_id variable into JSON fields node.id and cluster.uuid. + * Keeping those two fields together assures that they will be atomically set and become visible in logs at the same time. + */ +@Plugin(category = PatternConverter.CATEGORY, name = "NodeAndClusterIdConverter") +@ConverterKeys({"node_and_cluster_id"}) +public final class NodeAndClusterIdConverter extends LogEventPatternConverter { + private static final SetOnce nodeAndClusterId = new SetOnce<>(); + + /** + * Called by log4j2 to initialize this converter. + */ + public static NodeAndClusterIdConverter newInstance(@SuppressWarnings("unused") final String[] options) { + return new NodeAndClusterIdConverter(); + } + + public NodeAndClusterIdConverter() { + super("NodeAndClusterId", "node_and_cluster_id"); + } + + /** + * Updates only once the clusterID and nodeId. + * Subsequent executions will throw {@link org.apache.lucene.util.SetOnce.AlreadySetException}. + * + * @param nodeId a nodeId received from cluster state update + * @param clusterUUID a clusterId received from cluster state update + */ + public static void setNodeIdAndClusterId(String nodeId, String clusterUUID) { + nodeAndClusterId.set(formatIds(clusterUUID, nodeId)); + } + + /** + * Formats the node.id and cluster.uuid into json fields. + * + * @param event - a log event is ignored in this method as it uses the nodeId and clusterId to format + */ + @Override + public void format(LogEvent event, StringBuilder toAppendTo) { + if (nodeAndClusterId.get() != null) { + toAppendTo.append(nodeAndClusterId.get()); + } + // nodeId/clusterUuid not received yet, not appending + } + + private static String formatIds(String clusterUUID, String nodeId) { + return String.format(Locale.ROOT, "\"cluster.uuid\": \"%s\", \"node.id\": \"%s\"", clusterUUID, nodeId); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java new file mode 100644 index 0000000000000..e8f636238447a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/logging/NodeAndClusterIdStateListener.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +/** + * The {@link NodeAndClusterIdStateListener} listens to cluster state changes and ONLY when receives the first update + * it sets the clusterUUID and nodeID in log4j pattern converter {@link NodeAndClusterIdConverter}. + * Once the first update is received, it will automatically be de-registered from subsequent updates. + */ +public class NodeAndClusterIdStateListener implements ClusterStateObserver.Listener { + private static final Logger logger = LogManager.getLogger(NodeAndClusterIdStateListener.class); + + private NodeAndClusterIdStateListener() {} + + /** + * Subscribes for the first cluster state update where nodeId and clusterId is present + * and sets these values in {@link NodeAndClusterIdConverter}. + */ + public static void getAndSetNodeIdAndClusterId(ClusterService clusterService, ThreadContext threadContext) { + ClusterState clusterState = clusterService.state(); + ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, threadContext); + + observer.waitForNextChange(new NodeAndClusterIdStateListener(), NodeAndClusterIdStateListener::isNodeAndClusterIdPresent); + } + + private static boolean isNodeAndClusterIdPresent(ClusterState clusterState) { + return getNodeId(clusterState) != null && getClusterUUID(clusterState) != null; + } + + private static String getClusterUUID(ClusterState state) { + return state.getMetaData().clusterUUID(); + } + + private static String getNodeId(ClusterState state) { + return state.getNodes().getLocalNodeId(); + } + + @Override + public void onNewClusterState(ClusterState state) { + String nodeId = getNodeId(state); + String clusterUUID = getClusterUUID(state); + + logger.debug("Received cluster state update. Setting nodeId=[{}] and clusterUuid=[{}]", nodeId, clusterUUID); + NodeAndClusterIdConverter.setNodeIdAndClusterId(nodeId, clusterUUID); + } + + @Override + public void onClusterServiceClose() {} + + @Override + public void onTimeout(TimeValue timeout) {} +} diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index dfc538c23260f..0ea5f1e78cf8b 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -67,6 +67,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.NodeAndClusterIdStateListener; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; @@ -683,10 +684,15 @@ public Node start() throws NodeValidationException { transportService.acceptIncomingRequests(); discovery.startInitialJoin(); final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings); + NodeAndClusterIdStateListener.getAndSetNodeIdAndClusterId(clusterService, + injector.getInstance(ThreadPool.class).getThreadContext()); + if (initialStateTimeout.millis() > 0) { final ThreadPool thread = injector.getInstance(ThreadPool.class); ClusterState clusterState = clusterService.state(); - ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext()); + ClusterStateObserver observer = + new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext()); + if (clusterState.nodes().getMasterNodeId() == null) { logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout); final CountDownLatch latch = new CountDownLatch(1); diff --git a/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java new file mode 100644 index 0000000000000..d72b598f02865 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/logging/JsonThrowablePatternConverterTests.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.impl.Log4jLogEvent; +import org.apache.logging.log4j.message.SimpleMessage; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.StringReader; + +import static org.hamcrest.Matchers.equalTo; + +public class JsonThrowablePatternConverterTests extends ESTestCase { + JsonThrowablePatternConverter converter = JsonThrowablePatternConverter.newInstance(null, null); + + public void testNoStacktrace() throws IOException { + LogEvent event = Log4jLogEvent.newBuilder() + .build(); + String result = format(event); + + JsonLogLine jsonLogLine = JsonLogsStream.from(new BufferedReader(new StringReader(result))) + .findFirst() + .orElseThrow(() -> new AssertionError("no logs parsed")); + + assertThat(jsonLogLine.stacktrace(), Matchers.nullValue()); + } + + public void testStacktraceWithJson() throws IOException { + LogManager.getLogger().info("asdf"); + + String json = "{\n" + + " \"terms\" : {\n" + + " \"user\" : [\n" + + " \"u1\",\n" + + " \"u2\",\n" + + " \"u3\"\n" + + " ],\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + Exception thrown = new Exception(json); + LogEvent event = Log4jLogEvent.newBuilder() + .setMessage(new SimpleMessage("message")) + .setThrown(thrown) + .build(); + + String result = format(event); + + //confirms exception is correctly parsed + + JsonLogLine jsonLogLine = JsonLogsStream.from(new BufferedReader(new StringReader(result))) + .findFirst() + .orElseThrow(() -> new AssertionError("no logs parsed")); + + int jsonLength = json.split("\n").length; + int stacktraceLength = thrown.getStackTrace().length; + assertThat("stacktrace should formatted in multiple lines", + jsonLogLine.stacktrace().size(), equalTo(jsonLength + stacktraceLength)); + } + + private String format(LogEvent event) { + StringBuilder builder = new StringBuilder(); + converter.format(event, builder); + String jsonStacktraceElement = builder.toString(); + + return "{\"type\": \"console\", \"timestamp\": \"2019-01-03T16:30:53,058+0100\", \"level\": \"DEBUG\", " + + "\"component\": \"o.e.a.s.TransportSearchAction\", \"cluster.name\": \"clustername\", \"node.name\": \"node-0\", " + + "\"cluster.uuid\": \"OG5MkvOrR9azuClJhWvy6Q\", \"node.id\": \"VTShUqmcQG6SzeKY5nn7qA\", \"message\": \"msg msg\" " + + jsonStacktraceElement + "}"; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java new file mode 100644 index 0000000000000..fa8f3d7d27018 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogLine.java @@ -0,0 +1,158 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ObjectParser; + +import java.util.List; + + +/** + * Represents a single log line in a json format. + * Parsing log lines with this class confirms the json format of logs + */ +public class JsonLogLine { + public static final ObjectParser PARSER = createParser(false); + + private String type; + private String timestamp; + private String level; + private String component; + private String clusterName; + private String nodeName; + private String clusterUuid; + private String nodeId; + private String message; + private List stacktrace; + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder("JsonLogLine{"); + sb.append("type='").append(type).append('\''); + sb.append(", timestamp='").append(timestamp).append('\''); + sb.append(", level='").append(level).append('\''); + sb.append(", component='").append(component).append('\''); + sb.append(", clusterName='").append(clusterName).append('\''); + sb.append(", nodeName='").append(nodeName).append('\''); + sb.append(", clusterUuid='").append(clusterUuid).append('\''); + sb.append(", nodeId='").append(nodeId).append('\''); + sb.append(", message='").append(message).append('\''); + sb.append(", stacktrace=").append(stacktrace); + sb.append('}'); + return sb.toString(); + } + + public String type() { + return type; + } + + public String timestamp() { + return timestamp; + } + + public String level() { + return level; + } + + public String component() { + return component; + } + + public String clusterName() { + return clusterName; + } + + public String nodeName() { + return nodeName; + } + + public String clusterUuid() { + return clusterUuid; + } + + public String nodeId() { + return nodeId; + } + + public String message() { + return message; + } + + public List stacktrace() { + return stacktrace; + } + + public void setType(String type) { + this.type = type; + } + + public void setTimestamp(String timestamp) { + this.timestamp = timestamp; + } + + public void setLevel(String level) { + this.level = level; + } + + public void setComponent(String component) { + this.component = component; + } + + public void setClusterName(String clusterName) { + this.clusterName = clusterName; + } + + public void setNodeName(String nodeName) { + this.nodeName = nodeName; + } + + public void setClusterUuid(String clusterUuid) { + this.clusterUuid = clusterUuid; + } + + public void setNodeId(String nodeId) { + this.nodeId = nodeId; + } + + public void setMessage(String message) { + this.message = message; + } + + public void setStacktrace(List stacktrace) { + this.stacktrace = stacktrace; + } + + private static ObjectParser createParser(boolean ignoreUnknownFields) { + ObjectParser parser = new ObjectParser<>("search_template", ignoreUnknownFields, JsonLogLine::new); + parser.declareString(JsonLogLine::setType, new ParseField("type")); + parser.declareString(JsonLogLine::setTimestamp, new ParseField("timestamp")); + parser.declareString(JsonLogLine::setLevel, new ParseField("level")); + parser.declareString(JsonLogLine::setComponent, new ParseField("component")); + parser.declareString(JsonLogLine::setClusterName, new ParseField("cluster.name")); + parser.declareString(JsonLogLine::setNodeName, new ParseField("node.name")); + parser.declareString(JsonLogLine::setClusterUuid, new ParseField("cluster.uuid")); + parser.declareString(JsonLogLine::setNodeId, new ParseField("node.id")); + parser.declareString(JsonLogLine::setMessage, new ParseField("message")); + parser.declareStringArray(JsonLogLine::setStacktrace, new ParseField("stacktrace")); + + return parser; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java new file mode 100644 index 0000000000000..d9ba80d6b35de --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsIntegTestCase.java @@ -0,0 +1,129 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.BufferedReader; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Iterator; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.isEmptyOrNullString; +import static org.hamcrest.Matchers.not; + +/** + * Tests that extend this class verify that all json layout fields appear in the first few log lines after startup + * Fields available upon process startup: type, timestamp, level, component, + * message, node.name, cluster.name. + * Whereas node.id and cluster.uuid are available later once the first clusterState has been received. + * + * + * node.name, cluster.name, node.id, cluster.uuid + * should not change across all log lines + * + * Note that this won't pass for nodes in clusters that don't have the node name defined in elasticsearch.yml and start + * with DEBUG or TRACE level logging. Those nodes log a few lines before the node.name is set by LogConfigurator.setNodeName. + */ +public abstract class JsonLogsIntegTestCase extends ESRestTestCase { + /** + * Number of lines in the log file to check for the node.name, node.id or cluster.uuid. We don't + * just check the entire log file because it could be quite long + */ + private static final int LINES_TO_CHECK = 10; + + /** + * The node name to expect in the log file. + */ + protected abstract org.hamcrest.Matcher nodeNameMatcher(); + + /** + * Open the log file. This is delegated to subclasses because the test + * framework doesn't have permission to read from the log file but + * subclasses can grant themselves that permission. + */ + protected abstract BufferedReader openReader(Path logFile); + + public void testElementsPresentOnAllLinesOfLog() throws IOException { + JsonLogLine firstLine = findFirstLine(); + assertNotNull(firstLine); + + try (Stream stream = JsonLogsStream.from(openReader(getLogFile()))) { + stream.limit(LINES_TO_CHECK) + .forEach(jsonLogLine -> { + assertThat(jsonLogLine.type(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.timestamp(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.level(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.component(), not(isEmptyOrNullString())); + assertThat(jsonLogLine.message(), not(isEmptyOrNullString())); + + // all lines should have the same nodeName and clusterName + assertThat(jsonLogLine.nodeName(), nodeNameMatcher()); + assertThat(jsonLogLine.clusterName(), equalTo(firstLine.clusterName())); + }); + } + } + + private JsonLogLine findFirstLine() throws IOException { + try (Stream stream = JsonLogsStream.from(openReader(getLogFile()))) { + return stream.findFirst() + .orElseThrow(() -> new AssertionError("no logs at all?!")); + } + } + + public void testNodeIdAndClusterIdConsistentOnceAvailable() throws IOException { + try (Stream stream = JsonLogsStream.from(openReader(getLogFile()))) { + Iterator iterator = stream.iterator(); + + JsonLogLine firstLine = null; + while (iterator.hasNext()) { + JsonLogLine jsonLogLine = iterator.next(); + if (jsonLogLine.nodeId() != null) { + firstLine = jsonLogLine; + } + } + assertNotNull(firstLine); + + //once the nodeId and clusterId are received, they should be the same on remaining lines + + int i = 0; + while (iterator.hasNext() && i++ < LINES_TO_CHECK) { + JsonLogLine jsonLogLine = iterator.next(); + assertThat(jsonLogLine.nodeId(), equalTo(firstLine.nodeId())); + assertThat(jsonLogLine.clusterUuid(), equalTo(firstLine.clusterUuid())); + } + } + } + + @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") + private Path getLogFile() { + String logFileString = System.getProperty("tests.logfile"); + if (logFileString == null) { + fail("tests.logfile must be set to run this test. It is automatically " + + "set by gradle. If you must set it yourself then it should be the absolute path to the " + + "log file."); + } + return Paths.get(logFileString); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java new file mode 100644 index 0000000000000..28ad649f55a79 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/common/logging/JsonLogsStream.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.logging; + +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Iterator; +import java.util.Spliterator; +import java.util.Spliterators; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +/** + * Returns a stream of json log lines. + * This is intended to be used for easy and readable assertions for logger tests + */ +public class JsonLogsStream { + private final XContentParser parser; + private final BufferedReader reader; + + private JsonLogsStream(BufferedReader reader) throws IOException { + this.reader = reader; + this.parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + reader); + } + + public static Stream from(BufferedReader reader) throws IOException { + return new JsonLogsStream(reader).stream(); + } + + public static Stream from(Path path) throws IOException { + return from(Files.newBufferedReader(path)); + } + + private Stream stream() { + Spliterator spliterator = Spliterators.spliteratorUnknownSize(new JsonIterator(), Spliterator.ORDERED); + return StreamSupport.stream(spliterator, false) + .onClose(this::close); + } + + private void close() { + try { + parser.close(); + reader.close(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private class JsonIterator implements Iterator { + + @Override + public boolean hasNext() { + return parser.isClosed() == false; + } + + @Override + public JsonLogLine next() { + JsonLogLine apply = JsonLogLine.PARSER.apply(parser, null); + nextToken(); + return apply; + } + + private void nextToken() { + try { + parser.nextToken(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java deleted file mode 100644 index a8a142096e3dd..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/common/logging/NodeNameInLogsIntegTestCase.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.logging; - -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.test.rest.ESRestTestCase; - -import java.io.BufferedReader; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.regex.Pattern; -import java.util.regex.Matcher; - -import static org.hamcrest.Matchers.containsString; - -/** - * Tests that extend this class verify that the node name appears in the first - * few log lines on startup. Note that this won't pass for clusters that don't - * the node name defined in elasticsearch.yml and start with - * DEBUG or TRACE level logging. Those nodes log a few lines before they - * resolve the node name. - */ -public abstract class NodeNameInLogsIntegTestCase extends ESRestTestCase { - /** - * Number of lines in the log file to check for the node name. We don't - * just check the entire log file because it could be quite long and - * exceptions don't include the node name. - */ - private static final int LINES_TO_CHECK = 10; - - /** - * The node name to expect in the logs file. - */ - protected abstract org.hamcrest.Matcher nodeNameMatcher(); - - /** - * Open the log file. This is delegated to subclasses because the test - * framework doesn't have permission to read from the log file but - * subclasses can grant themselves that permission. - */ - protected abstract BufferedReader openReader(Path logFile); - - public void testNodeNameIsOnAllLinesOfLog() throws IOException { - BufferedReader logReader = openReader(getLogFile()); - try { - String line = logReader.readLine(); - assertNotNull("no logs at all?!", line); - Matcher m = Pattern.compile("\\] \\[([^\\]]+)\\] ").matcher(line); - if (false == m.find()) { - fail("Didn't see the node name in [" + line + "]"); - } - String nodeName = m.group(1); - - assertThat(nodeName, nodeNameMatcher()); - - int lineNumber = 1; - while (true) { - if (lineNumber < LINES_TO_CHECK) { - break; - } - line = logReader.readLine(); - if (line == null) { - break; // eof - } - lineNumber++; - assertThat(line, containsString("] [" + nodeName + "] ")); - } - } finally { - logReader.close(); - } - } - - @SuppressForbidden(reason = "PathUtils doesn't have permission to read this file") - private Path getLogFile() { - String logFileString = System.getProperty("tests.logfile"); - if (null == logFileString) { - fail("tests.logfile must be set to run this test. It is automatically " - + "set by gradle. If you must set it yourself then it should be the absolute path to the " - + "log file."); - } - return Paths.get(logFileString); - } -} diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle index 9147d5251b5be..bba9709087a56 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/build.gradle @@ -32,7 +32,7 @@ task writeJavaPolicy { javaPolicy.write( [ "grant {", - " permission java.io.FilePermission \"${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}.log\", \"read\";", + " permission java.io.FilePermission \"${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}_server.json\", \"read\";", "};" ].join("\n")) } @@ -54,7 +54,8 @@ followClusterTestRunner { systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" systemProperty 'tests.target_cluster', 'follow' systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" - systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/${-> followClusterTest.getNodes().get(0).clusterName}.log" + systemProperty 'log', "${-> followClusterTest.getNodes().get(0).homeDir}/logs/" + + "${-> followClusterTest.getNodes().get(0).clusterName}_server.json" finalizedBy 'leaderClusterTestCluster#stop' } diff --git a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index b612d8822437e..8fb305ba06ee6 100644 --- a/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/downgrade-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -11,13 +11,17 @@ import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.logging.JsonLogLine; +import org.elasticsearch.common.logging.JsonLogsStream; import org.elasticsearch.common.settings.Settings; +import org.hamcrest.FeatureMatcher; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; import java.io.IOException; -import java.nio.file.Files; -import java.util.Iterator; -import java.util.List; +import java.nio.file.Path; import java.util.Map; +import java.util.stream.Stream; import static org.elasticsearch.common.xcontent.ObjectPath.eval; import static org.hamcrest.Matchers.containsString; @@ -80,25 +84,10 @@ public void testDowngradeRemoteClusterToBasic() throws Exception { // (does not work on windows...) if (Constants.WINDOWS == false) { assertBusy(() -> { - final List lines = Files.readAllLines(PathUtils.get(System.getProperty("log"))); - final Iterator it = lines.iterator(); - boolean warn = false; - while (it.hasNext()) { - final String line = it.next(); - if (line.matches(".*\\[WARN\\s*\\]\\[o\\.e\\.x\\.c\\.a\\.AutoFollowCoordinator\\s*\\] \\[node-0\\] " + - "failure occurred while fetching cluster state for auto follow pattern \\[test_pattern\\]")) { - warn = true; - break; - } + Path path = PathUtils.get(System.getProperty("log")); + try (Stream stream = JsonLogsStream.from(path)) { + assertTrue(stream.anyMatch(autoFollowCoordinatorWarn()::matches)); } - assertTrue(warn); - assertTrue(it.hasNext()); - final String lineAfterWarn = it.next(); - assertThat( - lineAfterWarn, - equalTo("org.elasticsearch.ElasticsearchStatusException: " + - "can not fetch remote cluster state as the remote cluster [leader_cluster] is not licensed for [ccr]; " + - "the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]")); }); } }); @@ -108,10 +97,26 @@ public void testDowngradeRemoteClusterToBasic() throws Exception { assertThat(e.getMessage(), containsString("the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]")); } + private Matcher autoFollowCoordinatorWarn() { + return new FeatureMatcher(Matchers.is(true), "autoFollowCoordinatorWarn", "autoFollowCoordinatorWarn") { + + @Override + protected Boolean featureValueOf(JsonLogLine actual) { + return actual.level().equals("WARN") && + actual.component().equals("o.e.x.c.a.AutoFollowCoordinator") && + actual.nodeName().equals("node-0") && + actual.message().contains("failure occurred while fetching cluster state for auto follow pattern [test_pattern]") && + actual.stacktrace().contains("org.elasticsearch.ElasticsearchStatusException: can not fetch remote cluster state " + + "as the remote cluster [leader_cluster] is not licensed for [ccr]; the license mode [BASIC]" + + " on cluster [leader_cluster] does not enable [ccr]"); + } + }; + } + private void createNewIndexAndIndexDocs(RestClient client, String index) throws IOException { Settings settings = Settings.builder() - .put("index.soft_deletes.enabled", true) - .build(); + .put("index.soft_deletes.enabled", true) + .build(); Request request = new Request("PUT", "/" + index); request.setJsonEntity("{\"settings\": " + Strings.toString(settings) + ", \"mappings\": {\"properties\": {\"field\": {\"type\": \"keyword\"}}}}"); From ebe9c9523080133d547f04d9d67c78c8d6f95fbd Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Tue, 29 Jan 2019 10:23:50 +0200 Subject: [PATCH 44/57] [ML] Audit all errors during job deletion (#37933) This commit moves the auditing of job deletion related errors to the final listener in the job delete action. This ensures any error that occurs during job deletion is audited. --- .../xpack/ml/action/TransportDeleteJobAction.java | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index 90d8c6e677a83..cc3b704d77252 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -181,7 +181,10 @@ protected void masterOperation(Task task, DeleteJobAction.Request request, Clust // The listener that will be executed at the end of the chain will notify all listeners ActionListener finalListener = ActionListener.wrap( ack -> notifyListeners(request.getJobId(), ack, null), - e -> notifyListeners(request.getJobId(), null, e) + e -> { + notifyListeners(request.getJobId(), null, e); + auditor.error(request.getJobId(), Messages.getMessage(Messages.JOB_AUDIT_DELETING_FAILED, e.getMessage())); + } ); ActionListener markAsDeletingListener = ActionListener.wrap( @@ -192,10 +195,7 @@ protected void masterOperation(Task task, DeleteJobAction.Request request, Clust normalDeleteJob(parentTaskClient, request, finalListener); } }, - e -> { - auditor.error(request.getJobId(), Messages.getMessage(Messages.JOB_AUDIT_DELETING_FAILED, e.getMessage())); - finalListener.onFailure(e); - }); + finalListener::onFailure); ActionListener jobExistsListener = ActionListener.wrap( response -> { From eceb3185c79b9a777e6f53e899bc61b4f10ffd42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Henrique=20Gon=C3=A7alves?= <36671768+up201608320@users.noreply.github.com> Date: Tue, 29 Jan 2019 09:06:50 +0000 Subject: [PATCH 45/57] [ML] Make GetJobStats work with arbitrary wildcards and groups (#36683) The /_ml/anomaly_detectors/{job}/_stats endpoint now works correctly when {job} is a wildcard or job group. Closes #34745 --- .../elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java | 2 +- .../xpack/ml/action/TransportGetJobsStatsActionTests.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index dd3c4618f7025..23d320143ab34 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -118,7 +118,7 @@ public boolean allowNoJobs() { @Override public boolean match(Task task) { - return OpenJobAction.JobTaskMatcher.match(task, jobId); + return expandedJobsIds.stream().anyMatch(jobId -> OpenJobAction.JobTaskMatcher.match(task, jobId)); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsActionTests.java index 2ee184ec877ed..eb5a5a3dda526 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsActionTests.java @@ -39,7 +39,7 @@ public void testDetermineJobIds() { result = determineJobIdsWithoutLiveStats(Arrays.asList("id1", "id2", "id3"), Collections.singletonList(new GetJobsStatsAction.Response.JobStats("id1", new DataCounts("id1"), null, null, - JobState.CLOSED, null, null, null)) + JobState.OPENED, null, null, null)) ); assertEquals(2, result.size()); assertEquals("id2", result.get(0)); From 827c4f656711a4a8897a5139be386119a33b5f69 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 29 Jan 2019 10:44:01 +0100 Subject: [PATCH 46/57] Make Version.java aware of 6.x Lucene upgrade Relates to #37913 --- server/src/main/java/org/elasticsearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 8f4d799713b09..b66630344a7ea 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -121,7 +121,7 @@ public class Version implements Comparable, ToXContentFragment { public static final int V_6_6_0_ID = 6060099; public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final int V_6_7_0_ID = 6070099; - public static final Version V_6_7_0 = new Version(V_6_7_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); + public static final Version V_6_7_0 = new Version(V_6_7_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_7_0_0_ID = 7000099; public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version CURRENT = V_7_0_0; From 504a89feaf7c7eed55e7c44e7606fbc9ca7dae9a Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 29 Jan 2019 12:43:04 +0100 Subject: [PATCH 47/57] Step down as master when configured out of voting configuration (#37802) Abdicates to another master-eligible node once the active master is reconfigured out of the voting configuration, for example through the use of voting configuration exclusions. Follow-up to #37712 --- .../discovery/adding-removing-nodes.asciidoc | 5 +- .../cluster/coordination/Coordinator.java | 47 ++++++++++++++++--- .../cluster/coordination/Publication.java | 16 +++++++ .../cluster/coordination/Reconfigurator.java | 29 +++++++++--- .../cluster/SpecificMasterNodesIT.java | 13 +++-- .../coordination/CoordinatorTests.java | 3 ++ .../coordination/PublicationTests.java | 4 ++ .../coordination/ReconfiguratorTests.java | 38 +++++++++++---- .../coordination/VotingConfigurationIT.java | 41 ++++++++++++++++ 9 files changed, 170 insertions(+), 26 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java diff --git a/docs/reference/modules/discovery/adding-removing-nodes.asciidoc b/docs/reference/modules/discovery/adding-removing-nodes.asciidoc index 3b416ea51d223..ccc0e99125371 100644 --- a/docs/reference/modules/discovery/adding-removing-nodes.asciidoc +++ b/docs/reference/modules/discovery/adding-removing-nodes.asciidoc @@ -72,7 +72,10 @@ The node that should be added to the exclusions list is specified using <> in place of `node_name` here. If a call to the voting configuration exclusions API fails, you can safely retry it. Only a successful response guarantees that the node has actually been removed from the -voting configuration and will not be reinstated. +voting configuration and will not be reinstated. If it's the active master that +was removed from the voting configuration, then it will abdicate to another +master-eligible node that's still in the voting configuration, if such a node +is available. Although the voting configuration exclusions API is most useful for down-scaling a two-node to a one-node cluster, it is also possible to use it to remove diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index aabe5466d69a9..4bf977f8398ce 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -112,6 +112,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private final PeerFinder peerFinder; private final PreVoteCollector preVoteCollector; + private final Random random; private final ElectionSchedulerFactory electionSchedulerFactory; private final UnicastConfiguredHostsResolver configuredHostsResolver; private final TimeValue publishTimeout; @@ -153,6 +154,7 @@ public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSe this.lastJoin = Optional.empty(); this.joinAccumulator = new InitialJoinAccumulator(); this.publishTimeout = PUBLISH_TIMEOUT_SETTING.get(settings); + this.random = random; this.electionSchedulerFactory = new ElectionSchedulerFactory(settings, random, transportService.getThreadPool()); this.preVoteCollector = new PreVoteCollector(transportService, this::startElection, this::updateMaxTermSeen); configuredHostsResolver = new UnicastConfiguredHostsResolver(nodeName, settings, transportService, unicastHostsProvider); @@ -366,11 +368,33 @@ private void startElection() { } } + private void abdicateTo(DiscoveryNode newMaster) { + assert Thread.holdsLock(mutex); + assert mode == Mode.LEADER : "expected to be leader on abdication but was " + mode; + assert newMaster.isMasterNode() : "should only abdicate to master-eligible node but was " + newMaster; + final StartJoinRequest startJoinRequest = new StartJoinRequest(newMaster, Math.max(getCurrentTerm(), maxTermSeen) + 1); + logger.info("abdicating to {} with term {}", newMaster, startJoinRequest.getTerm()); + getLastAcceptedState().nodes().mastersFirstStream().forEach(node -> { + if (isZen1Node(node) == false) { + joinHelper.sendStartJoinRequest(startJoinRequest, node); + } + }); + // handling of start join messages on the local node will be dispatched to the generic thread-pool + assert mode == Mode.LEADER : "should still be leader after sending abdication messages " + mode; + // explicitly move node to candidate state so that the next cluster state update task yields an onNoLongerMaster event + becomeCandidate("after abdicating to " + newMaster); + } + private static boolean electionQuorumContainsLocalNode(ClusterState lastAcceptedState) { - final String localNodeId = lastAcceptedState.nodes().getLocalNodeId(); - assert localNodeId != null; - return lastAcceptedState.getLastCommittedConfiguration().getNodeIds().contains(localNodeId) - || lastAcceptedState.getLastAcceptedConfiguration().getNodeIds().contains(localNodeId); + final DiscoveryNode localNode = lastAcceptedState.nodes().getLocalNode(); + assert localNode != null; + return electionQuorumContains(lastAcceptedState, localNode); + } + + private static boolean electionQuorumContains(ClusterState lastAcceptedState, DiscoveryNode node) { + final String nodeId = node.getId(); + return lastAcceptedState.getLastCommittedConfiguration().getNodeIds().contains(nodeId) + || lastAcceptedState.getLastAcceptedConfiguration().getNodeIds().contains(nodeId); } private Optional ensureTermAtLeast(DiscoveryNode sourceNode, long targetTerm) { @@ -780,7 +804,7 @@ ClusterState improveConfiguration(ClusterState clusterState) { .filter(this::hasJoinVoteFrom).filter(discoveryNode -> isZen1Node(discoveryNode) == false).collect(Collectors.toSet()); final VotingConfiguration newConfig = reconfigurator.reconfigure(liveNodes, clusterState.getVotingConfigExclusions().stream().map(VotingConfigExclusion::getNodeId).collect(Collectors.toSet()), - clusterState.getLastAcceptedConfiguration()); + getLocalNode(), clusterState.getLastAcceptedConfiguration()); if (newConfig.equals(clusterState.getLastAcceptedConfiguration()) == false) { assert coordinationState.get().joinVotesHaveQuorumFor(newConfig); return ClusterState.builder(clusterState).metaData(MetaData.builder(clusterState.metaData()) @@ -1192,7 +1216,18 @@ public void onSuccess(String source) { updateMaxTermSeen(getCurrentTerm()); if (mode == Mode.LEADER) { - scheduleReconfigurationIfNeeded(); + final ClusterState state = getLastAcceptedState(); // committed state + if (electionQuorumContainsLocalNode(state) == false) { + final List masterCandidates = completedNodes().stream() + .filter(DiscoveryNode::isMasterNode) + .filter(node -> electionQuorumContains(state, node)) + .collect(Collectors.toList()); + if (masterCandidates.isEmpty() == false) { + abdicateTo(masterCandidates.get(random.nextInt(masterCandidates.size()))); + } + } else { + scheduleReconfigurationIfNeeded(); + } } lagDetector.startLagDetector(publishRequest.getAcceptedState().version()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java index 4aea820d6d9e0..da7c1d02a1e0b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java @@ -36,6 +36,7 @@ import java.util.Optional; import java.util.Set; import java.util.function.LongSupplier; +import java.util.stream.Collectors; public abstract class Publication { @@ -92,6 +93,13 @@ public void onFaultyNode(DiscoveryNode faultyNode) { onPossibleCompletion(); } + public List completedNodes() { + return publicationTargets.stream() + .filter(PublicationTarget::isSuccessfullyCompleted) + .map(PublicationTarget::getDiscoveryNode) + .collect(Collectors.toList()); + } + public boolean isCommitted() { return applyCommitRequest.isPresent(); } @@ -268,6 +276,10 @@ void onFaultyNode(DiscoveryNode faultyNode) { } } + DiscoveryNode getDiscoveryNode() { + return discoveryNode; + } + private void ackOnce(Exception e) { if (ackIsPending) { ackIsPending = false; @@ -280,6 +292,10 @@ boolean isActive() { && state != PublicationTargetState.APPLIED_COMMIT; } + boolean isSuccessfullyCompleted() { + return state == PublicationTargetState.APPLIED_COMMIT; + } + boolean isWaitingForQuorum() { return state == PublicationTargetState.WAITING_FOR_QUORUM; } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Reconfigurator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Reconfigurator.java index 5c7b9562d8d8a..ebca37bdac0b1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Reconfigurator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Reconfigurator.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.util.set.Sets; import java.util.Collection; +import java.util.Collections; import java.util.Set; import java.util.TreeSet; import java.util.stream.Collectors; @@ -90,18 +91,23 @@ public String toString() { * @param retiredNodeIds Nodes that are leaving the cluster and which should not appear in the configuration if possible. Nodes that are * retired and not in the current configuration will never appear in the resulting configuration; this is useful * for shifting the vote in a 2-node cluster so one of the nodes can be restarted without harming availability. + * @param currentMaster The current master. Unless retired, we prefer to keep the current master in the config. * @param currentConfig The current configuration. As far as possible, we prefer to keep the current config as-is. * @return An optimal configuration, or leave the current configuration unchanged if the optimal configuration has no live quorum. */ - public VotingConfiguration reconfigure(Set liveNodes, Set retiredNodeIds, VotingConfiguration currentConfig) { + public VotingConfiguration reconfigure(Set liveNodes, Set retiredNodeIds, DiscoveryNode currentMaster, + VotingConfiguration currentConfig) { assert liveNodes.stream().noneMatch(Coordinator::isZen1Node) : liveNodes; - logger.trace("{} reconfiguring {} based on liveNodes={}, retiredNodeIds={}", this, currentConfig, liveNodes, retiredNodeIds); + assert liveNodes.contains(currentMaster) : "liveNodes = " + liveNodes + " master = " + currentMaster; + logger.trace("{} reconfiguring {} based on liveNodes={}, retiredNodeIds={}, currentMaster={}", + this, currentConfig, liveNodes, retiredNodeIds, currentMaster); /* * There are three true/false properties of each node in play: live/non-live, retired/non-retired and in-config/not-in-config. * Firstly we divide the nodes into disjoint sets based on these properties: * - * - nonRetiredInConfigNotLiveIds + * - nonRetiredMaster + * - nonRetiredNotMasterInConfigNotLiveIds * - nonRetiredInConfigLiveIds * - nonRetiredLiveNotInConfigIds * @@ -125,6 +131,17 @@ public VotingConfiguration reconfigure(Set liveNodes, Set final Set nonRetiredInConfigLiveIds = new TreeSet<>(liveInConfigIds); nonRetiredInConfigLiveIds.removeAll(retiredNodeIds); + final Set nonRetiredInConfigLiveMasterIds; + final Set nonRetiredInConfigLiveNotMasterIds; + if (nonRetiredInConfigLiveIds.contains(currentMaster.getId())) { + nonRetiredInConfigLiveNotMasterIds = new TreeSet<>(nonRetiredInConfigLiveIds); + nonRetiredInConfigLiveNotMasterIds.remove(currentMaster.getId()); + nonRetiredInConfigLiveMasterIds = Collections.singleton(currentMaster.getId()); + } else { + nonRetiredInConfigLiveNotMasterIds = nonRetiredInConfigLiveIds; + nonRetiredInConfigLiveMasterIds = Collections.emptySet(); + } + final Set nonRetiredLiveNotInConfigIds = Sets.sortedDifference(liveNodeIds, currentConfig.getNodeIds()); nonRetiredLiveNotInConfigIds.removeAll(retiredNodeIds); @@ -151,9 +168,9 @@ public VotingConfiguration reconfigure(Set liveNodes, Set * The new configuration is formed by taking this many nodes in the following preference order: */ final VotingConfiguration newConfig = new VotingConfiguration( - // live nodes first, preferring the current config, and if we need more then use non-live nodes - Stream.of(nonRetiredInConfigLiveIds, nonRetiredLiveNotInConfigIds, nonRetiredInConfigNotLiveIds) - .flatMap(Collection::stream).limit(targetSize).collect(Collectors.toSet())); + // live master first, then other live nodes, preferring the current config, and if we need more then use non-live nodes + Stream.of(nonRetiredInConfigLiveMasterIds, nonRetiredInConfigLiveNotMasterIds, nonRetiredLiveNotInConfigIds, + nonRetiredInConfigNotLiveIds).flatMap(Collection::stream).limit(targetSize).collect(Collectors.toSet())); if (newConfig.hasQuorum(liveNodeIds)) { return newConfig; diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index aaef1e58fb50e..aaf01b5e6e079 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -31,12 +31,12 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.Collections; import java.util.List; -import java.util.concurrent.ExecutionException; import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -106,7 +106,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); } - public void testElectOnlyBetweenMasterNodes() throws IOException, ExecutionException, InterruptedException { + public void testElectOnlyBetweenMasterNodes() throws Exception { logger.info("--> start data node / non master node"); internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true) .put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); @@ -138,7 +138,14 @@ public void testElectOnlyBetweenMasterNodes() throws IOException, ExecutionExcep logger.info("--> closing master node (1)"); client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(new String[]{masterNodeName})).get(); - internalCluster().stopCurrentMasterNode(); + // removing the master from the voting configuration immediately triggers the master to step down + assertBusy(() -> { + assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); + assertThat(internalCluster().masterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); + }); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodeName)); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); assertThat(internalCluster().masterClient().admin().cluster().prepareState() diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index be40f0c888362..36495914bddec 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -66,6 +66,7 @@ import org.elasticsearch.test.disruption.DisruptableMockTransport.ConnectionStatus; import org.elasticsearch.transport.TransportService; import org.hamcrest.Matcher; +import org.hamcrest.core.IsCollectionContaining; import org.junit.After; import org.junit.Before; @@ -1331,6 +1332,8 @@ void stabilise(long stabilisationDurationMillis) { final VotingConfiguration lastCommittedConfiguration = lastAcceptedState.getLastCommittedConfiguration(); assertTrue(connectedNodeIds + " should be a quorum of " + lastCommittedConfiguration, lastCommittedConfiguration.hasQuorum(connectedNodeIds)); + assertThat("leader " + leader.getLocalNode() + " should be part of voting configuration " + lastCommittedConfiguration, + lastCommittedConfiguration.getNodeIds(), IsCollectionContaining.hasItem(leader.getLocalNode().getId())); assertThat("no reconfiguration is in progress", lastAcceptedState.getLastCommittedConfiguration(), equalTo(lastAcceptedState.getLastAcceptedConfiguration())); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java index 658250bc7a4da..d332888c185ac 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java @@ -56,6 +56,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; public class PublicationTests extends ESTestCase { @@ -178,6 +179,7 @@ public void testSimpleClusterStatePublishing() throws InterruptedException { discoveryNodes, singleNodeConfig, singleNodeConfig, 42L), ackListener, Collections.emptySet()); assertThat(publication.pendingPublications.keySet(), equalTo(discoNodes)); + assertThat(publication.completedNodes(), empty()); assertTrue(publication.pendingCommits.isEmpty()); AtomicBoolean processedNode1PublishResponse = new AtomicBoolean(); boolean delayProcessingNode2PublishResponse = randomBoolean(); @@ -232,10 +234,12 @@ public void testSimpleClusterStatePublishing() throws InterruptedException { assertFalse(publication.completed); assertFalse(publication.committed); + assertThat(publication.completedNodes(), containsInAnyOrder(n1, n3)); publication.pendingCommits.get(n2).onResponse(TransportResponse.Empty.INSTANCE); } assertTrue(publication.completed); + assertThat(publication.completedNodes(), containsInAnyOrder(n1, n2, n3)); assertTrue(publication.committed); assertThat(ackListener.await(0L, TimeUnit.SECONDS), containsInAnyOrder(n1, n2, n3)); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ReconfiguratorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ReconfiguratorTests.java index 7e7c7adbe1af9..bbd9514222c77 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ReconfiguratorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ReconfiguratorTests.java @@ -31,6 +31,7 @@ import org.junit.Before; import java.util.Arrays; +import java.util.Comparator; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -52,6 +53,7 @@ public void testReconfigurationExamples() { check(nodes("a"), conf("a"), true, conf("a")); check(nodes("a", "b"), conf("a"), true, conf("a")); + check(nodes("a", "b"), conf("b"), true, conf("b")); check(nodes("a", "b"), conf("a", "c"), true, conf("a")); check(nodes("a", "b"), conf("a", "b"), true, conf("a")); check(nodes("a", "b"), conf("a", "b", "e"), true, conf("a", "b", "e")); @@ -64,6 +66,7 @@ public void testReconfigurationExamples() { check(nodes("a", "b", "c", "d"), conf("a", "b", "e"), true, conf("a", "b", "c")); check(nodes("a", "b", "c", "d", "e"), conf("a", "f", "g"), true, conf("a", "b", "c", "d", "e")); check(nodes("a", "b", "c", "d"), conf("a", "b", "c", "d", "e"), true, conf("a", "b", "c")); + check(nodes("e", "a", "b", "c"), retired(), "e", conf("a", "b", "c", "d", "e"), true, conf("a", "b", "e")); check(nodes("a", "b", "c"), conf("a", "b", "c", "d", "e"), true, conf("a", "b", "c")); check(nodes("a"), conf("a"), false, conf("a")); @@ -124,7 +127,8 @@ public void testAutoShrinking() { final int quorumSize = Math.max(liveNodes.length / 2 + 1, initialVotingNodes.length < 3 ? 1 : 2); - final VotingConfiguration finalConfig = reconfigurator.reconfigure(liveNodesSet, emptySet(), initialConfig); + final VotingConfiguration finalConfig = reconfigurator.reconfigure(liveNodesSet, emptySet(), + randomFrom(liveNodesSet), initialConfig); final String description = "reconfigure " + liveNodesSet + " from " + initialConfig + " yielded " + finalConfig; @@ -152,7 +156,8 @@ public void testManualShrinking() { final int quorumSize = Math.max(liveNodes.length, initialVotingNodes.length) / 2 + 1; - final VotingConfiguration finalConfig = reconfigurator.reconfigure(liveNodesSet, emptySet(), initialConfig); + final VotingConfiguration finalConfig = reconfigurator.reconfigure(liveNodesSet, emptySet(), randomFrom(liveNodesSet), + initialConfig); final String description = "reconfigure " + liveNodesSet + " from " + initialConfig + " yielded " + finalConfig; @@ -187,13 +192,20 @@ private void check(Set liveNodes, VotingConfiguration config, boo private void check(Set liveNodes, Set retired, VotingConfiguration config, boolean autoShrinkVotingConfiguration, VotingConfiguration expectedConfig) { + final DiscoveryNode master = liveNodes.stream().sorted(Comparator.comparing(DiscoveryNode::getId)).findFirst().get(); + check(liveNodes, retired, master.getId(), config, autoShrinkVotingConfiguration, expectedConfig); + } + + private void check(Set liveNodes, Set retired, String masterId, VotingConfiguration config, + boolean autoShrinkVotingConfiguration, VotingConfiguration expectedConfig) { final Reconfigurator reconfigurator = makeReconfigurator(Settings.builder() .put(CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION.getKey(), autoShrinkVotingConfiguration) .build()); - final VotingConfiguration adaptedConfig = reconfigurator.reconfigure(liveNodes, retired, config); - assertEquals(new ParameterizedMessage("[liveNodes={}, retired={}, config={}, autoShrinkVotingConfiguration={}]", - liveNodes, retired, config, autoShrinkVotingConfiguration).getFormattedMessage(), + final DiscoveryNode master = liveNodes.stream().filter(n -> n.getId().equals(masterId)).findFirst().get(); + final VotingConfiguration adaptedConfig = reconfigurator.reconfigure(liveNodes, retired, master, config); + assertEquals(new ParameterizedMessage("[liveNodes={}, retired={}, master={}, config={}, autoShrinkVotingConfiguration={}]", + liveNodes, retired, master, config, autoShrinkVotingConfiguration).getFormattedMessage(), expectedConfig, adaptedConfig); } @@ -206,18 +218,24 @@ public void testDynamicSetting() { final Reconfigurator reconfigurator = new Reconfigurator(Settings.EMPTY, clusterSettings); final VotingConfiguration initialConfig = conf("a", "b", "c", "d", "e"); + Set twoNodes = nodes("a", "b"); + Set threeNodes = nodes("a", "b", "c"); + // default is "true" - assertThat(reconfigurator.reconfigure(nodes("a", "b"), retired(), initialConfig), equalTo(conf("a", "b", "c"))); + assertThat(reconfigurator.reconfigure(twoNodes, retired(), randomFrom(twoNodes), initialConfig), equalTo(conf("a", "b", "c"))); // update to "false" clusterSettings.applySettings(Settings.builder().put(CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION.getKey(), "false").build()); - assertThat(reconfigurator.reconfigure(nodes("a", "b"), retired(), initialConfig), sameInstance(initialConfig)); // no quorum - assertThat(reconfigurator.reconfigure(nodes("a", "b", "c"), retired(), initialConfig), equalTo(conf("a", "b", "c", "d", "e"))); - assertThat(reconfigurator.reconfigure(nodes("a", "b", "c"), retired("d"), initialConfig), equalTo(conf("a", "b", "c", "e"))); + assertThat(reconfigurator.reconfigure(twoNodes, retired(), randomFrom(twoNodes), initialConfig), + sameInstance(initialConfig)); // no quorum + assertThat(reconfigurator.reconfigure(threeNodes, retired(), randomFrom(threeNodes), initialConfig), + equalTo(conf("a", "b", "c", "d", "e"))); + assertThat(reconfigurator.reconfigure(threeNodes, retired("d"), randomFrom(threeNodes), initialConfig), + equalTo(conf("a", "b", "c", "e"))); // explicitly set to "true" clusterSettings.applySettings(Settings.builder().put(CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION.getKey(), "true").build()); - assertThat(reconfigurator.reconfigure(nodes("a", "b"), retired(), initialConfig), equalTo(conf("a", "b", "c"))); + assertThat(reconfigurator.reconfigure(twoNodes, retired(), randomFrom(twoNodes), initialConfig), equalTo(conf("a", "b", "c"))); expectThrows(IllegalArgumentException.class, () -> clusterSettings.applySettings(Settings.builder().put(CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION.getKey(), "blah").build())); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java new file mode 100644 index 0000000000000..8c6775cb6c91e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/VotingConfigurationIT.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.coordination; + +import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; +import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; +import org.elasticsearch.common.Priority; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.concurrent.ExecutionException; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class VotingConfigurationIT extends ESIntegTestCase { + + public void testAbdicateAfterVotingConfigExclusionAdded() throws ExecutionException, InterruptedException { + internalCluster().startNodes(2); + final String originalMaster = internalCluster().getMasterName(); + + logger.info("--> excluding master node {}", originalMaster); + client().execute(AddVotingConfigExclusionsAction.INSTANCE, + new AddVotingConfigExclusionsRequest(new String[]{originalMaster})).get(); + client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + assertNotEquals(originalMaster, internalCluster().getMasterName()); + } +} From 460f10ce602f067816c2615f035a3d42f42d3ed1 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Tue, 29 Jan 2019 13:15:58 +0100 Subject: [PATCH 48/57] Close Index API should force a flush if a sync is needed (#37961) This commit changes the TransportVerifyShardBeforeCloseAction so that it issues a forced flush, forcing the translog and the Lucene commit to contain the same max seq number and global checkpoint in the case the Translog contains operations that were not written in the IndexWriter (like a Delete that touches a non existing doc). This way the assertion added in #37426 won't trip. Related to #33888 --- ...TransportVerifyShardBeforeCloseAction.java | 9 ++++-- .../index/engine/FrozenIndexTests.java | 30 +++++++++++++++++++ 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index f08f6ea7dffa2..a36a012f397ec 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.action.admin.indices.close; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.support.ActionFilters; @@ -50,6 +52,7 @@ public class TransportVerifyShardBeforeCloseAction extends TransportReplicationA TransportVerifyShardBeforeCloseAction.ShardRequest, TransportVerifyShardBeforeCloseAction.ShardRequest, ReplicationResponse> { public static final String NAME = CloseIndexAction.NAME + "[s]"; + protected Logger logger = LogManager.getLogger(getClass()); @Inject public TransportVerifyShardBeforeCloseAction(final Settings settings, final TransportService transportService, @@ -111,8 +114,10 @@ private void executeShardOperation(final ShardRequest request, final IndexShard throw new IllegalStateException("Global checkpoint [" + indexShard.getGlobalCheckpoint() + "] mismatches maximum sequence number [" + maxSeqNo + "] on index shard " + shardId); } - indexShard.flush(new FlushRequest()); - logger.debug("{} shard is ready for closing", shardId); + + final boolean forced = indexShard.isSyncNeeded(); + indexShard.flush(new FlushRequest().force(forced)); + logger.trace("{} shard is ready for closing [forced:{}]", shardId, forced); } @Override diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java index c0493b6efd1fe..094c79efb50a9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.IndicesOptions; @@ -26,6 +27,7 @@ import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; @@ -46,6 +48,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; public class FrozenIndexTests extends ESSingleNodeTestCase { @@ -340,4 +344,30 @@ public void testFreezeIndexIncreasesIndexSettingsVersion() throws ExecutionExcep assertThat(client().admin().cluster().prepareState().get().getState().metaData().index(index).getSettingsVersion(), equalTo(settingsVersion + 1)); } + + public void testFreezeEmptyIndexWithTranslogOps() throws Exception { + final String indexName = "empty"; + createIndex(indexName, Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.refresh_interval", TimeValue.MINUS_ONE) + .build()); + + final long nbNoOps = randomIntBetween(1, 10); + for (long i = 0; i < nbNoOps; i++) { + final DeleteResponse deleteResponse = client().prepareDelete(indexName, "_doc", Long.toString(i)).get(); + assertThat(deleteResponse.status(), is(RestStatus.NOT_FOUND)); + } + + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + assertBusy(() -> { + final Index index = client().admin().cluster().prepareState().get().getState().metaData().index(indexName).getIndex(); + final IndexService indexService = indicesService.indexService(index); + assertThat(indexService.hasShard(0), is(true)); + assertThat(indexService.getShard(0).getGlobalCheckpoint(), greaterThanOrEqualTo(nbNoOps - 1L)); + }); + + assertAcked(new XPackClient(client()).freeze(new TransportFreezeIndexAction.FreezeRequest(indexName))); + assertIndexFrozen(indexName); + } } From 4f4113e96474efde4ebeee351ff947ae22f6723d Mon Sep 17 00:00:00 2001 From: Przemyslaw Gomulka Date: Tue, 29 Jan 2019 14:53:55 +0100 Subject: [PATCH 49/57] Rename security audit.log to _audit.json (#37916) in order to keep json logs consistent the security audit logs are renamed from .log to .json relates #32850 --- docs/reference/settings/audit-settings.asciidoc | 2 +- x-pack/docs/en/security/auditing/event-types.asciidoc | 2 +- x-pack/docs/en/security/auditing/output-logfile.asciidoc | 8 ++++---- x-pack/docs/en/security/auditing/overview.asciidoc | 2 +- x-pack/docs/en/security/configuring-es.asciidoc | 2 +- x-pack/plugin/core/src/main/config/log4j2.properties | 4 ++-- x-pack/plugin/sql/qa/security/build.gradle | 2 +- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/reference/settings/audit-settings.asciidoc b/docs/reference/settings/audit-settings.asciidoc index 483c889ce5898..d4762d9f42fb1 100644 --- a/docs/reference/settings/audit-settings.asciidoc +++ b/docs/reference/settings/audit-settings.asciidoc @@ -14,7 +14,7 @@ file. For more information, see `xpack.security.audit.enabled`:: Set to `true` to enable auditing on the node. The default value is `false`. -This puts the auditing events in a dedicated file named `_audit.log` +This puts the auditing events in a dedicated file named `_audit.json` on each node. For more information, see <>. [[event-audit-settings]] diff --git a/x-pack/docs/en/security/auditing/event-types.asciidoc b/x-pack/docs/en/security/auditing/event-types.asciidoc index ace4d7d4c09c8..19947e40b5553 100644 --- a/x-pack/docs/en/security/auditing/event-types.asciidoc +++ b/x-pack/docs/en/security/auditing/event-types.asciidoc @@ -48,7 +48,7 @@ The following is a list of the events that can be generated: In 6.5.0, there is a new <> format. This format also brings in a few changes for audit event attributes. -The new format is output to the `_audit.log` file. +The new format is output to the `_audit.json` file. The audit entries are formatted as flat JSON documents (that is to say, no nested objects), one per line. Hence, the attribute names are JSON keys and they follow a dotted name syntax. Any attributes that lack a value (`null`) are not diff --git a/x-pack/docs/en/security/auditing/output-logfile.asciidoc b/x-pack/docs/en/security/auditing/output-logfile.asciidoc index ac7128852b942..f5b1dbad79ae9 100644 --- a/x-pack/docs/en/security/auditing/output-logfile.asciidoc +++ b/x-pack/docs/en/security/auditing/output-logfile.asciidoc @@ -3,7 +3,7 @@ === Logfile audit output The `logfile` audit output is the default output for auditing. It writes data to -the `_audit.log` file in the logs directory. To maintain +the `_audit.json` file in the logs directory. To maintain compatibility with releases prior to 6.5.0, a `_access.log` file is also generated. They differ in the output format but the contents are similar. For systems that are not ingesting the audit file for search or @@ -43,7 +43,7 @@ by default points to the `elasticsearch.log` file. [[audit-log-entry-format]] === Log entry format -The log entries in the `_audit.log` file have the following format: +The log entries in the `_audit.json` file have the following format: - Each log entry is a one line JSON document and each one is printed on a separate line. - The fields of a log entry are ordered. However, if a field does not have a value it @@ -100,14 +100,14 @@ audited in plain text when including the request body in audit events. [[logging-file]] You can also configure how the logfile is written in the `log4j2.properties` file located in `ES_PATH_CONF`. By default, audit information is appended to the -`_audit.log` file located in the standard Elasticsearch `logs` directory +`_audit.json` file located in the standard Elasticsearch `logs` directory (typically located at `$ES_HOME/logs`). The file rolls over on a daily basis. The deprecated logfile audit format (`_access.log`) can be disabled from the same `log4j2.properties` file (hint: look for the comment instructing to set the log level to `off`). The deprecated format is a duplication of information that is in place to assure backwards compatibility. If you are not strict about the audit format it is strongly recommended to only use the -`_audit.log` log appender. +`_audit.json` log appender. [float] [[audit-log-ignore-policy]] diff --git a/x-pack/docs/en/security/auditing/overview.asciidoc b/x-pack/docs/en/security/auditing/overview.asciidoc index f0b58684e078f..51f24c9bcced0 100644 --- a/x-pack/docs/en/security/auditing/overview.asciidoc +++ b/x-pack/docs/en/security/auditing/overview.asciidoc @@ -13,5 +13,5 @@ Audit logs are **disabled** by default. To enable this functionality, you must set `xpack.security.audit.enabled` to `true` in `elasticsearch.yml`. ============================================================================ -The audit log persists events to a dedicated `_audit.log` file on +The audit log persists events to a dedicated `_audit.json` file on the host's file system (on each node). diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index db2c8c664b9d2..e1dab76293c5f 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -131,7 +131,7 @@ and <>. .. Restart {es}. -Events are logged to a dedicated `_audit.log` file in +Events are logged to a dedicated `_audit.json` file in `ES_HOME/logs`, on each cluster node. -- diff --git a/x-pack/plugin/core/src/main/config/log4j2.properties b/x-pack/plugin/core/src/main/config/log4j2.properties index 52b6ce3950690..2b7e112eb14fc 100644 --- a/x-pack/plugin/core/src/main/config/log4j2.properties +++ b/x-pack/plugin/core/src/main/config/log4j2.properties @@ -1,6 +1,6 @@ appender.audit_rolling.type = RollingFile appender.audit_rolling.name = audit_rolling -appender.audit_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit.log +appender.audit_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit.json appender.audit_rolling.layout.type = PatternLayout appender.audit_rolling.layout.pattern = {\ "@timestamp":"%d{ISO8601}"\ @@ -64,7 +64,7 @@ appender.audit_rolling.layout.pattern = {\ # "rule" name of the applied rulee if the "origin.type" is "ip_filter" # "event.category" fixed value "elasticsearch-audit" -appender.audit_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit-%d{yyyy-MM-dd}.log +appender.audit_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit-%d{yyyy-MM-dd}.json appender.audit_rolling.policies.type = Policies appender.audit_rolling.policies.time.type = TimeBasedTriggeringPolicy appender.audit_rolling.policies.time.interval = 1 diff --git a/x-pack/plugin/sql/qa/security/build.gradle b/x-pack/plugin/sql/qa/security/build.gradle index 69389b47accec..d9bffd393641d 100644 --- a/x-pack/plugin/sql/qa/security/build.gradle +++ b/x-pack/plugin/sql/qa/security/build.gradle @@ -42,7 +42,7 @@ subprojects { integTestRunner { systemProperty 'tests.audit.logfile', - "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_audit.log" + "${ -> integTest.nodes[0].homeDir}/logs/${ -> integTest.nodes[0].clusterName }_audit.json" } runqa { From a6d4838a6779b4073008dc1f2e75a3ffb7cdfe0f Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Tue, 29 Jan 2019 14:56:22 +0100 Subject: [PATCH 50/57] Clean up allowPartialSearchResults serialization (#37911) When serializing allowPartialSearchResults to the shards through ShardSearchTransportRequest, we use an optional boolean field, though the corresponding instance member is declared `boolean` which can never be null. We also have an assert to verify that the incoming search request provides a non-null value for the flag, and a comment explaining that null should be considered a bug. This commit makes the allowPartialSearchResults method in ShardSearchRequest return a `boolean` rather than a `Boolean` and changes the serialization from optional to non optional, in a bw comp manner. --- .../search/internal/ShardSearchLocalRequest.java | 11 +++++++---- .../search/internal/ShardSearchRequest.java | 2 +- .../internal/ShardSearchTransportRequest.java | 2 +- .../elasticsearch/index/SearchSlowLogTests.java | 4 ++-- .../internal/ShardSearchTransportRequestTests.java | 14 ++++++++++++++ .../search/slice/SliceBuilderTests.java | 4 ++-- 6 files changed, 27 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 0921681124e33..08cdd2fc0dc16 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -170,7 +170,7 @@ public Boolean requestCache() { } @Override - public Boolean allowPartialSearchResults() { + public boolean allowPartialSearchResults() { return allowPartialSearchResults; } @@ -216,7 +216,9 @@ protected void innerReadFrom(StreamInput in) throws IOException { nowInMillis = in.readVLong(); requestCache = in.readOptionalBoolean(); clusterAlias = in.readOptionalString(); - if (in.getVersion().onOrAfter(Version.V_6_3_0)) { + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + allowPartialSearchResults = in.readBoolean(); + } else if (in.getVersion().onOrAfter(Version.V_6_3_0)) { allowPartialSearchResults = in.readOptionalBoolean(); } if (in.getVersion().onOrAfter(Version.V_6_4_0)) { @@ -244,7 +246,9 @@ protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException } out.writeOptionalBoolean(requestCache); out.writeOptionalString(clusterAlias); - if (out.getVersion().onOrAfter(Version.V_6_3_0)) { + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeBoolean(allowPartialSearchResults); + } else if (out.getVersion().onOrAfter(Version.V_6_3_0)) { out.writeOptionalBoolean(allowPartialSearchResults); } if (asKey == false) { @@ -295,5 +299,4 @@ public Rewriteable rewrite(QueryRewriteContext ctx) throws IOException { } } } - } diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 3fc16584eb0bf..85800a8066c38 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -69,7 +69,7 @@ public interface ShardSearchRequest { Boolean requestCache(); - Boolean allowPartialSearchResults(); + boolean allowPartialSearchResults(); Scroll scroll(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 59d1c2e089e02..d1ff306e3c848 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -150,7 +150,7 @@ public Boolean requestCache() { } @Override - public Boolean allowPartialSearchResults() { + public boolean allowPartialSearchResults() { return shardSearchLocalRequest.allowPartialSearchResults(); } diff --git a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index fc24bdf9691de..6c510b3a9bd70 100644 --- a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -114,8 +114,8 @@ public Boolean requestCache() { } @Override - public Boolean allowPartialSearchResults() { - return null; + public boolean allowPartialSearchResults() { + return true; } @Override diff --git a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java index da987a657260a..5cf7880ee32b7 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchTransportRequestTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.InvalidAliasNameException; import org.elasticsearch.search.AbstractSearchTestCase; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; @@ -76,6 +77,19 @@ public void testSerialization() throws Exception { assertEquals(deserializedRequest.getAliasFilter(), shardSearchTransportRequest.getAliasFilter()); assertEquals(deserializedRequest.indexBoost(), shardSearchTransportRequest.indexBoost(), 0.0f); assertEquals(deserializedRequest.getClusterAlias(), shardSearchTransportRequest.getClusterAlias()); + assertEquals(shardSearchTransportRequest.allowPartialSearchResults(), deserializedRequest.allowPartialSearchResults()); + } + + public void testAllowPartialResultsSerializationPre7_0_0() throws IOException { + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_0_0)); + ShardSearchTransportRequest shardSearchTransportRequest = createShardSearchTransportRequest(); + ShardSearchTransportRequest deserializedRequest = + copyWriteable(shardSearchTransportRequest, namedWriteableRegistry, ShardSearchTransportRequest::new, version); + if (version.before(Version.V_6_3_0)) { + assertFalse(deserializedRequest.allowPartialSearchResults()); + } else { + assertEquals(shardSearchTransportRequest.allowPartialSearchResults(), deserializedRequest.allowPartialSearchResults()); + } } private ShardSearchTransportRequest createShardSearchTransportRequest() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java index 30ed0cb5ab5b5..04fd5947e5f6d 100644 --- a/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/slice/SliceBuilderTests.java @@ -163,8 +163,8 @@ public Boolean requestCache() { } @Override - public Boolean allowPartialSearchResults() { - return null; + public boolean allowPartialSearchResults() { + return true; } @Override From 42eec55837f99167b36ebc2dbffb3d6bd52d7279 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Tue, 29 Jan 2019 14:57:33 +0100 Subject: [PATCH 51/57] Replace failure.get().addSuppressed with failure.accumulateAndGet() (#37649) Also add a test for concurrent incoming failures --- .../action/support/GroupedActionListener.java | 5 +- .../support/GroupedActionListenerTests.java | 52 +++++++++++++------ 2 files changed, 39 insertions(+), 18 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java b/server/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java index ed9b7c8d15d60..532396ee6095c 100644 --- a/server/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java @@ -72,7 +72,10 @@ public void onResponse(T element) { @Override public void onFailure(Exception e) { if (failure.compareAndSet(null, e) == false) { - failure.get().addSuppressed(e); + failure.accumulateAndGet(e, (previous, current) -> { + previous.addSuppressed(current); + return previous; + }); } if (countDown.countDown()) { delegate.onFailure(failure.get()); diff --git a/server/src/test/java/org/elasticsearch/action/support/GroupedActionListenerTests.java b/server/src/test/java/org/elasticsearch/action/support/GroupedActionListenerTests.java index 2af2da7ba0939..9f6454d4e4bcf 100644 --- a/server/src/test/java/org/elasticsearch/action/support/GroupedActionListenerTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/GroupedActionListenerTests.java @@ -26,10 +26,14 @@ import java.util.Collection; import java.util.Collections; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import static org.hamcrest.CoreMatchers.instanceOf; + public class GroupedActionListenerTests extends ESTestCase { public void testNotifications() throws InterruptedException { @@ -55,20 +59,17 @@ public void onFailure(Exception e) { Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads); for (int i = 0; i < numThreads; i++) { - threads[i] = new Thread() { - @Override - public void run() { - try { - barrier.await(10, TimeUnit.SECONDS); - } catch (Exception e) { - throw new AssertionError(e); - } - int c = 0; - while((c = count.incrementAndGet()) <= groupSize) { - listener.onResponse(c-1); - } + threads[i] = new Thread(() -> { + try { + barrier.await(10, TimeUnit.SECONDS); + } catch (Exception e) { + throw new AssertionError(e); + } + int c = 0; + while((c = count.incrementAndGet()) <= groupSize) { + listener.onResponse(c-1); } - }; + }); threads[i].start(); } for (Thread t : threads) { @@ -100,11 +101,9 @@ public void onFailure(Exception e) { excRef.set(e); } }; - Collection defaults = randomBoolean() ? Collections.singletonList(-1) : - Collections.emptyList(); + Collection defaults = randomBoolean() ? Collections.singletonList(-1) : Collections.emptyList(); int size = randomIntBetween(3, 4); - GroupedActionListener listener = new GroupedActionListener<>(result, size, - defaults); + GroupedActionListener listener = new GroupedActionListener<>(result, size, defaults); listener.onResponse(0); IOException ioException = new IOException(); RuntimeException rtException = new RuntimeException(); @@ -121,4 +120,23 @@ public void onFailure(Exception e) { listener.onResponse(1); assertNull(resRef.get()); } + + public void testConcurrentFailures() throws InterruptedException { + AtomicReference finalException = new AtomicReference<>(); + int numGroups = randomIntBetween(10, 100); + GroupedActionListener listener = new GroupedActionListener<>( + ActionListener.wrap(r -> {}, finalException::set), numGroups, Collections.emptyList()); + ExecutorService executorService = Executors.newFixedThreadPool(numGroups); + for (int i = 0; i < numGroups; i++) { + executorService.submit(() -> listener.onFailure(new IOException())); + } + + executorService.shutdown(); + executorService.awaitTermination(10, TimeUnit.SECONDS); + + Exception exception = finalException.get(); + assertNotNull(exception); + assertThat(exception, instanceOf(IOException.class)); + assertEquals(numGroups - 1, exception.getSuppressed().length); + } } From 2325fb9cb3530607475233e7c7281e5ff7241001 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Tue, 29 Jan 2019 14:58:11 +0100 Subject: [PATCH 52/57] Remove test only SearchShardTarget constructor (#37912) Remove SearchShardTarget test only constructor and replace all the usages with calls to the other constructor that accepts a ShardId. --- .../DiscountedCumulativeGainTests.java | 9 ++--- .../index/rankeval/EvalQueryQualityTests.java | 5 +-- .../rankeval/ExpectedReciprocalRankTests.java | 5 +-- .../rankeval/MeanReciprocalRankTests.java | 5 +-- .../index/rankeval/PrecisionAtKTests.java | 9 ++--- .../index/rankeval/RankEvalResponseTests.java | 7 ++-- .../search/SearchShardTarget.java | 6 ---- .../ElasticsearchExceptionTests.java | 26 +++++++-------- .../ExceptionSerializationTests.java | 3 +- .../action/search/CountedCollectorTests.java | 9 ++--- .../action/search/DfsQueryPhaseTests.java | 33 ++++++++++--------- .../action/search/FetchSearchPhaseTests.java | 26 +++++++++------ .../search/SearchPhaseControllerTests.java | 26 +++++++++------ .../SearchPhaseExecutionExceptionTests.java | 13 ++++---- .../search/SearchScrollAsyncActionTests.java | 11 ++++--- .../rest/BytesRestResponseTests.java | 7 ++-- .../SignificanceHeuristicTests.java | 6 ++-- .../search/MockSearchServiceTests.java | 5 +-- .../xpack/watcher/WatcherServiceTests.java | 3 +- .../CompareConditionSearchTests.java | 5 +-- .../execution/TriggeredWatchStoreTests.java | 5 +-- 21 files changed, 124 insertions(+), 100 deletions(-) diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java index 468a1ac2e5721..0e2d2b482e0b4 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -30,7 +31,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; @@ -73,7 +74,7 @@ public void testDCGAt() { for (int i = 0; i < 6; i++) { rated.add(new RatedDocument("index", Integer.toString(i), relevanceRatings[i])); hits[i] = new SearchHit(i, Integer.toString(i), new Text("type"), Collections.emptyMap()); - hits[i].shard(new SearchShardTarget("testnode", new Index("index", "uuid"), 0, null)); + hits[i].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0), null, OriginalIndices.NONE)); } DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); assertEquals(EXPECTED_DCG, dcg.evaluate("id", hits, rated).metricScore(), DELTA); @@ -123,7 +124,7 @@ public void testDCGAtSixMissingRatings() { } } hits[i] = new SearchHit(i, Integer.toString(i), new Text("type"), Collections.emptyMap()); - hits[i].shard(new SearchShardTarget("testnode", new Index("index", "uuid"), 0, null)); + hits[i].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0), null, OriginalIndices.NONE)); } DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); EvalQueryQuality result = dcg.evaluate("id", hits, rated); @@ -180,7 +181,7 @@ public void testDCGAtFourMoreRatings() { SearchHit[] hits = new SearchHit[4]; for (int i = 0; i < 4; i++) { hits[i] = new SearchHit(i, Integer.toString(i), new Text("type"), Collections.emptyMap()); - hits[i].shard(new SearchShardTarget("testnode", new Index("index", "uuid"), 0, null)); + hits[i].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0), null, OriginalIndices.NONE)); } DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); EvalQueryQuality result = dcg.evaluate("id", hits, ratedDocs); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java index 7424542ac26aa..bfb3e3d55158f 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/EvalQueryQualityTests.java @@ -19,13 +19,14 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; @@ -56,7 +57,7 @@ public static EvalQueryQuality randomEvalQueryQuality() { for (int i = 0; i < numberOfSearchHits; i++) { RatedSearchHit ratedSearchHit = RatedSearchHitTests.randomRatedSearchHit(); // we need to associate each hit with an index name otherwise rendering will not work - ratedSearchHit.getSearchHit().shard(new SearchShardTarget("_na_", new Index("index", "_na_"), 0, null)); + ratedSearchHit.getSearchHit().shard(new SearchShardTarget("_na_", new ShardId("index", "_na_", 0), null, OriginalIndices.NONE)); ratedHits.add(ratedSearchHit); } EvalQueryQuality evalQueryQuality = new EvalQueryQuality(randomAlphaOfLength(10), diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/ExpectedReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/ExpectedReciprocalRankTests.java index fe33c246f7d7a..3906b05ce9d60 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/ExpectedReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/ExpectedReciprocalRankTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.text.Text; @@ -29,7 +30,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; @@ -116,7 +117,7 @@ private SearchHit[] createSearchHits(List rated, Integer[] releva rated.add(new RatedDocument("index", Integer.toString(i), relevanceRatings[i])); } hits[i] = new SearchHit(i, Integer.toString(i), new Text("type"), Collections.emptyMap()); - hits[i].shard(new SearchShardTarget("testnode", new Index("index", "uuid"), 0, null)); + hits[i].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0), null, OriginalIndices.NONE)); } return hits; } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java index fdb64806d5c9e..6b37fee3c5e8a 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.text.Text; @@ -29,7 +30,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; @@ -204,7 +205,7 @@ private static SearchHit[] createSearchHits(int from, int to, String index) { SearchHit[] hits = new SearchHit[to + 1 - from]; for (int i = from; i <= to; i++) { hits[i] = new SearchHit(i, i + "", new Text(""), Collections.emptyMap()); - hits[i].shard(new SearchShardTarget("testnode", new Index(index, "uuid"), 0, null)); + hits[i].shard(new SearchShardTarget("testnode", new ShardId(index, "uuid", 0), null, OriginalIndices.NONE)); } return hits; } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java index b9e7bf25aaf7a..d24d8bc56d4eb 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.rankeval; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.text.Text; @@ -29,7 +30,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; @@ -111,7 +112,7 @@ public void testIgnoreUnlabeled() { // add an unlabeled search hit SearchHit[] searchHits = Arrays.copyOf(toSearchHits(rated, "test"), 3); searchHits[2] = new SearchHit(2, "2", new Text("testtype"), Collections.emptyMap()); - searchHits[2].shard(new SearchShardTarget("testnode", new Index("index", "uuid"), 0, null)); + searchHits[2].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0), null, OriginalIndices.NONE)); EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", searchHits, rated); assertEquals((double) 2 / 3, evaluated.metricScore(), 0.00001); @@ -130,7 +131,7 @@ public void testNoRatedDocs() throws Exception { SearchHit[] hits = new SearchHit[5]; for (int i = 0; i < 5; i++) { hits[i] = new SearchHit(i, i + "", new Text("type"), Collections.emptyMap()); - hits[i].shard(new SearchShardTarget("testnode", new Index("index", "uuid"), 0, null)); + hits[i].shard(new SearchShardTarget("testnode", new ShardId("index", "uuid", 0), null, OriginalIndices.NONE)); } EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", hits, Collections.emptyList()); assertEquals(0.0d, evaluated.metricScore(), 0.00001); @@ -252,7 +253,7 @@ private static SearchHit[] toSearchHits(List rated, String index) SearchHit[] hits = new SearchHit[rated.size()]; for (int i = 0; i < rated.size(); i++) { hits[i] = new SearchHit(i, i + "", new Text(""), Collections.emptyMap()); - hits[i].shard(new SearchShardTarget("testnode", new Index(index, "uuid"), 0, null)); + hits[i].shard(new SearchShardTarget("testnode", new ShardId(index, "uuid", 0), null, OriginalIndices.NONE)); } return hits; } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java index c12f923aa6089..070b2439e53cb 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalResponseTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.rankeval; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.cluster.block.ClusterBlockException; @@ -37,7 +38,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.SearchShardTarget; @@ -69,7 +70,7 @@ public class RankEvalResponseTests extends ESTestCase { new IllegalArgumentException("Closed resource", new RuntimeException("Resource")), new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[] { new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), - new SearchShardTarget("node_1", new Index("foo", "_na_"), 1, null)) }), + new SearchShardTarget("node_1", new ShardId("foo", "_na_", 1), null, OriginalIndices.NONE)) }), new ElasticsearchException("Parsing failed", new ParsingException(9, 42, "Wrong state", new NullPointerException("Unexpected null value"))) }; @@ -181,7 +182,7 @@ public void testToXContent() throws IOException { private static RatedSearchHit searchHit(String index, int docId, Integer rating) { SearchHit hit = new SearchHit(docId, docId + "", new Text(""), Collections.emptyMap()); - hit.shard(new SearchShardTarget("testnode", new Index(index, "uuid"), 0, null)); + hit.shard(new SearchShardTarget("testnode", new ShardId(index, "uuid", 0), null, OriginalIndices.NONE)); hit.score(1.0f); return new RatedSearchHit(hit, rating != null ? OptionalInt.of(rating) : OptionalInt.empty()); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java index 42f3b67e358e4..6aadf8425997d 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/server/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.text.Text; -import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.transport.RemoteClusterAware; @@ -62,11 +61,6 @@ public SearchShardTarget(String nodeId, ShardId shardId, @Nullable String cluste this.clusterAlias = clusterAlias; } - //this constructor is only used in tests - public SearchShardTarget(String nodeId, Index index, int shardId, String clusterAlias) { - this(nodeId, new ShardId(index, shardId), clusterAlias, OriginalIndices.NONE); - } - @Nullable public String getNodeId() { return nodeId.string(); diff --git a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java index 4faf754b13869..21ee15d01cf81 100644 --- a/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java +++ b/server/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java @@ -115,9 +115,9 @@ public void testGuessRootCause() { assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "index_not_found_exception"); assertEquals("no such index [foo]", rootCauses[0].getMessage()); ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), - new SearchShardTarget("node_1", new Index("foo", "_na_"), 1, null)); + new SearchShardTarget("node_1", new ShardId("foo", "_na_", 1), null, OriginalIndices.NONE)); ShardSearchFailure failure1 = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), - new SearchShardTarget("node_1", new Index("foo", "_na_"), 2, null)); + new SearchShardTarget("node_1", new ShardId("foo", "_na_", 2), null, OriginalIndices.NONE)); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1}); if (randomBoolean()) { @@ -136,11 +136,11 @@ public void testGuessRootCause() { { ShardSearchFailure failure = new ShardSearchFailure( new ParsingException(1, 2, "foobar", null), - new SearchShardTarget("node_1", new Index("foo", "_na_"), 1, null)); + new SearchShardTarget("node_1", new ShardId("foo", "_na_", 1), null, OriginalIndices.NONE)); ShardSearchFailure failure1 = new ShardSearchFailure(new QueryShardException(new Index("foo1", "_na_"), "foobar", null), - new SearchShardTarget("node_1", new Index("foo1", "_na_"), 1, null)); + new SearchShardTarget("node_1", new ShardId("foo1", "_na_", 1), null, OriginalIndices.NONE)); ShardSearchFailure failure2 = new ShardSearchFailure(new QueryShardException(new Index("foo1", "_na_"), "foobar", null), - new SearchShardTarget("node_1", new Index("foo1", "_na_"), 2, null)); + new SearchShardTarget("node_1", new ShardId("foo1", "_na_", 2), null, OriginalIndices.NONE)); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1, failure2}); final ElasticsearchException[] rootCauses = ex.guessRootCauses(); @@ -187,9 +187,9 @@ public void testGuessRootCause() { public void testDeduplicate() throws IOException { { ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), - new SearchShardTarget("node_1", new Index("foo", "_na_"), 1, null)); + new SearchShardTarget("node_1", new ShardId("foo", "_na_", 1), null, OriginalIndices.NONE)); ShardSearchFailure failure1 = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), - new SearchShardTarget("node_1", new Index("foo", "_na_"), 2, null)); + new SearchShardTarget("node_1", new ShardId("foo", "_na_", 2), null, OriginalIndices.NONE)); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", randomBoolean() ? failure1.getCause() : failure.getCause(), new ShardSearchFailure[]{failure, failure1}); XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -203,11 +203,11 @@ public void testDeduplicate() throws IOException { } { ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), - new SearchShardTarget("node_1", new Index("foo", "_na_"), 1, null)); + new SearchShardTarget("node_1", new ShardId("foo", "_na_", 1), null, OriginalIndices.NONE)); ShardSearchFailure failure1 = new ShardSearchFailure(new QueryShardException(new Index("foo1", "_na_"), "foobar", null), - new SearchShardTarget("node_1", new Index("foo1", "_na_"), 1, null)); + new SearchShardTarget("node_1", new ShardId("foo1", "_na_", 1), null, OriginalIndices.NONE)); ShardSearchFailure failure2 = new ShardSearchFailure(new QueryShardException(new Index("foo1", "_na_"), "foobar", null), - new SearchShardTarget("node_1", new Index("foo1", "_na_"), 2, null)); + new SearchShardTarget("node_1", new ShardId("foo1", "_na_", 2), null, OriginalIndices.NONE)); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1, failure2}); XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -223,9 +223,9 @@ public void testDeduplicate() throws IOException { } { ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), - new SearchShardTarget("node_1", new Index("foo", "_na_"), 1, null)); + new SearchShardTarget("node_1", new ShardId("foo", "_na_", 1), null, OriginalIndices.NONE)); ShardSearchFailure failure1 = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), - new SearchShardTarget("node_1", new Index("foo", "_na_"), 2, null)); + new SearchShardTarget("node_1", new ShardId("foo", "_na_", 2), null, OriginalIndices.NONE)); NullPointerException nullPointerException = new NullPointerException(); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", nullPointerException, new ShardSearchFailure[]{failure, failure1}); @@ -932,7 +932,7 @@ public static Tuple randomExceptions() { actual = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{ new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), - new SearchShardTarget("node_1", new Index("foo", "_na_"), 1, null)) + new SearchShardTarget("node_1", new ShardId("foo", "_na_", 1), null, OriginalIndices.NONE)) }); expected = new ElasticsearchException("Elasticsearch exception [type=search_phase_execution_exception, " + "reason=all shards failed]"); diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 97ce870d1bada..00354fd19dacc 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -24,6 +24,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.TimestampParsingException; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -278,7 +279,7 @@ public void testQueryShardException() throws IOException { } public void testSearchException() throws IOException { - SearchShardTarget target = new SearchShardTarget("foo", new Index("bar", "_na_"), 1, null); + SearchShardTarget target = new SearchShardTarget("foo", new ShardId("bar", "_na_", 1), null, OriginalIndices.NONE); SearchException ex = serialize(new SearchException(target, "hello world")); assertEquals(target, ex.shard()); assertEquals(ex.getMessage(), "hello world"); diff --git a/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java b/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java index 951b5997508ca..a85f63abd0967 100644 --- a/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/CountedCollectorTests.java @@ -18,8 +18,9 @@ */ package org.elasticsearch.action.search; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -61,13 +62,13 @@ public void testCollect() throws InterruptedException { DfsSearchResult dfsSearchResult = new DfsSearchResult(shardID, null); dfsSearchResult.setShardIndex(shardID); dfsSearchResult.setSearchShardTarget(new SearchShardTarget("foo", - new Index("bar", "baz"), shardID, null)); + new ShardId("bar", "baz", shardID), null, OriginalIndices.NONE)); collector.onResult(dfsSearchResult);}); break; case 2: state.add(2); - executor.execute(() -> collector.onFailure(shardID, new SearchShardTarget("foo", new Index("bar", "baz"), - shardID, null), new RuntimeException("boom"))); + executor.execute(() -> collector.onFailure(shardID, new SearchShardTarget("foo", new ShardId("bar", "baz", shardID), + null, OriginalIndices.NONE), new RuntimeException("boom"))); break; default: fail("unknown state"); diff --git a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java index c7a3137230405..8a8e28e15f20d 100644 --- a/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/DfsQueryPhaseTests.java @@ -24,10 +24,11 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.MockDirectoryWrapper; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -53,8 +54,8 @@ private static DfsSearchResult newSearchResult(int shardIndex, long requestId, S public void testDfsWith2Shards() throws IOException { AtomicArray results = new AtomicArray<>(2); AtomicReference> responseRef = new AtomicReference<>(); - results.set(0, newSearchResult(0, 1, new SearchShardTarget("node1", new Index("test", "na"), 0, null))); - results.set(1, newSearchResult(1, 2, new SearchShardTarget("node2", new Index("test", "na"), 0, null))); + results.set(0, newSearchResult(0, 1, new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); + results.set(1, newSearchResult(1, 2, new SearchShardTarget("node2", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); results.get(0).termsStatistics(new Term[0], new TermStatistics[0]); results.get(1).termsStatistics(new Term[0], new TermStatistics[0]); @@ -65,16 +66,16 @@ public void testDfsWith2Shards() throws IOException { public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, SearchActionListener listener) { if (request.id() == 1) { - QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, - null)); + QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new ShardId("test", "na", 0), + null, OriginalIndices.NONE)); queryResult.topDocs(new TopDocsAndMaxScore( new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(2); // the size of the result set listener.onResponse(queryResult); } else if (request.id() == 2) { - QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node2", new Index("test", "na"), 0, - null)); + QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node2", new ShardId("test", "na", 0), + null, OriginalIndices.NONE)); queryResult.topDocs(new TopDocsAndMaxScore( new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); @@ -113,8 +114,8 @@ public void run() throws IOException { public void testDfsWith1ShardFailed() throws IOException { AtomicArray results = new AtomicArray<>(2); AtomicReference> responseRef = new AtomicReference<>(); - results.set(0, newSearchResult(0, 1, new SearchShardTarget("node1", new Index("test", "na"), 0, null))); - results.set(1, newSearchResult(1, 2, new SearchShardTarget("node2", new Index("test", "na"), 0, null))); + results.set(0, newSearchResult(0, 1, new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); + results.set(1, newSearchResult(1, 2, new SearchShardTarget("node2", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); results.get(0).termsStatistics(new Term[0], new TermStatistics[0]); results.get(1).termsStatistics(new Term[0], new TermStatistics[0]); @@ -125,8 +126,8 @@ public void testDfsWith1ShardFailed() throws IOException { public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, SearchActionListener listener) { if (request.id() == 1) { - QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, - null)); + QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new ShardId("test", "na", 0), + null, OriginalIndices.NONE)); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs( new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); @@ -170,8 +171,8 @@ public void run() throws IOException { public void testFailPhaseOnException() throws IOException { AtomicArray results = new AtomicArray<>(2); AtomicReference> responseRef = new AtomicReference<>(); - results.set(0, newSearchResult(0, 1, new SearchShardTarget("node1", new Index("test", "na"), 0, null))); - results.set(1, newSearchResult(1, 2, new SearchShardTarget("node2", new Index("test", "na"), 0, null))); + results.set(0, newSearchResult(0, 1, new SearchShardTarget("node1", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); + results.set(1, newSearchResult(1, 2, new SearchShardTarget("node2", new ShardId("test", "na", 0), null, OriginalIndices.NONE))); results.get(0).termsStatistics(new Term[0], new TermStatistics[0]); results.get(1).termsStatistics(new Term[0], new TermStatistics[0]); @@ -182,8 +183,8 @@ public void testFailPhaseOnException() throws IOException { public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, SearchActionListener listener) { if (request.id() == 1) { - QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, - null)); + QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new ShardId("test", "na", 0), + null, OriginalIndices.NONE)); queryResult.topDocs(new TopDocsAndMaxScore( new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); @@ -206,7 +207,7 @@ public void run() throws IOException { } }, mockSearchPhaseContext); assertEquals("dfs_query", phase.getName()); - expectThrows(UncheckedIOException.class, () -> phase.run()); + expectThrows(UncheckedIOException.class, phase::run); assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty()); // phase execution will clean up on the contexts } diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java index b00c7f0bb152e..5614476c851b8 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchSearchPhaseTests.java @@ -22,9 +22,10 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.store.MockDirectoryWrapper; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -95,14 +96,15 @@ public void testFetchTwoDocument() throws IOException { controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); - QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); + QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new ShardId("test", "na", 0), + null, OriginalIndices.NONE)); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult); - queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1, null)); + queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new ShardId("test", "na", 1), null, OriginalIndices.NONE)); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); @@ -153,14 +155,15 @@ public void testFailFetchOneDoc() throws IOException { controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); - QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); + QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new ShardId("test", "na", 0), + null, OriginalIndices.NONE)); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult); - queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1, null)); + queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new ShardId("test", "na", 1), null, OriginalIndices.NONE)); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); @@ -215,7 +218,8 @@ public void testFetchDocsConcurrently() throws IOException, InterruptedException controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), numHits); AtomicReference responseRef = new AtomicReference<>(); for (int i = 0; i < numHits; i++) { - QuerySearchResult queryResult = new QuerySearchResult(i, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); + QuerySearchResult queryResult = new QuerySearchResult(i, new SearchShardTarget("node1", new ShardId("test", "na", 0), + null, OriginalIndices.NONE)); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(i+1, i)}), i), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set @@ -272,14 +276,15 @@ public void testExceptionFailsPhase() throws IOException { controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = randomIntBetween(2, 10); - QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); + QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new ShardId("test", "na", 0), + null, OriginalIndices.NONE)); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult); - queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1, null)); + queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new ShardId("test", "na", 1), null, OriginalIndices.NONE)); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); @@ -329,14 +334,15 @@ public void testCleanupIrrelevantContexts() throws IOException { // contexts tha controller.newSearchPhaseResults(mockSearchPhaseContext.getRequest(), 2); AtomicReference responseRef = new AtomicReference<>(); int resultSetSize = 1; - QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0, null)); + QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new ShardId("test", "na", 0), + null, OriginalIndices.NONE)); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(42, 1.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); // the size of the result set queryResult.setShardIndex(0); results.consumeResult(queryResult); - queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new Index("test", "na"), 1, null)); + queryResult = new QuerySearchResult(321, new SearchShardTarget("node2", new ShardId("test", "na", 1), null, OriginalIndices.NONE)); queryResult.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(84, 2.0F)}), 2.0F), new DocValueFormat[0]); queryResult.size(resultSetSize); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index e262147ef85d0..e9cde3f7aadea 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.text.Text; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; @@ -321,7 +320,8 @@ public void testConsumer() { request.setBatchedReduceSize(bufferSize); InitialSearchPhase.ArraySearchPhaseResults consumer = searchPhaseController.newSearchPhaseResults(request, 3); assertEquals(0, reductions.size()); - QuerySearchResult result = new QuerySearchResult(0, new SearchShardTarget("node", new Index("a", "b"), 0, null)); + QuerySearchResult result = new QuerySearchResult(0, new SearchShardTarget("node", new ShardId("a", "b", 0), + null, OriginalIndices.NONE)); result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), new DocValueFormat[0]); InternalAggregations aggs = new InternalAggregations(Collections.singletonList(new InternalMax("test", 1.0D, DocValueFormat.RAW, @@ -330,7 +330,7 @@ public void testConsumer() { result.setShardIndex(0); consumer.consumeResult(result); - result = new QuerySearchResult(1, new SearchShardTarget("node", new Index("a", "b"), 0, null)); + result = new QuerySearchResult(1, new SearchShardTarget("node", new ShardId("a", "b", 0), null, OriginalIndices.NONE)); result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), new DocValueFormat[0]); aggs = new InternalAggregations(Collections.singletonList(new InternalMax("test", 3.0D, DocValueFormat.RAW, @@ -339,7 +339,7 @@ public void testConsumer() { result.setShardIndex(2); consumer.consumeResult(result); - result = new QuerySearchResult(1, new SearchShardTarget("node", new Index("a", "b"), 0, null)); + result = new QuerySearchResult(1, new SearchShardTarget("node", new ShardId("a", "b", 0), null, OriginalIndices.NONE)); result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), Float.NaN), new DocValueFormat[0]); aggs = new InternalAggregations(Collections.singletonList(new InternalMax("test", 2.0D, DocValueFormat.RAW, @@ -389,7 +389,8 @@ public void testConsumerConcurrently() throws InterruptedException { threads[i] = new Thread(() -> { int number = randomIntBetween(1, 1000); max.updateAndGet(prev -> Math.max(prev, number)); - QuerySearchResult result = new QuerySearchResult(id, new SearchShardTarget("node", new Index("a", "b"), id, null)); + QuerySearchResult result = new QuerySearchResult(id, new SearchShardTarget("node", new ShardId("a", "b", id), + null, OriginalIndices.NONE)); result.topDocs(new TopDocsAndMaxScore( new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(0, number)}), number), new DocValueFormat[0]); @@ -432,7 +433,8 @@ public void testConsumerOnlyAggs() { for (int i = 0; i < expectedNumResults; i++) { int number = randomIntBetween(1, 1000); max.updateAndGet(prev -> Math.max(prev, number)); - QuerySearchResult result = new QuerySearchResult(i, new SearchShardTarget("node", new Index("a", "b"), i, null)); + QuerySearchResult result = new QuerySearchResult(i, new SearchShardTarget("node", new ShardId("a", "b", i), + null, OriginalIndices.NONE)); result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), number), new DocValueFormat[0]); InternalAggregations aggs = new InternalAggregations(Collections.singletonList(new InternalMax("test", (double) number, @@ -469,7 +471,8 @@ public void testConsumerOnlyHits() { for (int i = 0; i < expectedNumResults; i++) { int number = randomIntBetween(1, 1000); max.updateAndGet(prev -> Math.max(prev, number)); - QuerySearchResult result = new QuerySearchResult(i, new SearchShardTarget("node", new Index("a", "b"), i, null)); + QuerySearchResult result = new QuerySearchResult(i, new SearchShardTarget("node", new ShardId("a", "b", i), + null, OriginalIndices.NONE)); result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] {new ScoreDoc(0, number)}), number), new DocValueFormat[0]); result.setShardIndex(i); @@ -536,7 +539,8 @@ public void testReduceTopNWithFromOffset() { searchPhaseController.newSearchPhaseResults(request, 4); int score = 100; for (int i = 0; i < 4; i++) { - QuerySearchResult result = new QuerySearchResult(i, new SearchShardTarget("node", new Index("a", "b"), i, null)); + QuerySearchResult result = new QuerySearchResult(i, new SearchShardTarget("node", new ShardId("a", "b", i), + null, OriginalIndices.NONE)); ScoreDoc[] docs = new ScoreDoc[3]; for (int j = 0; j < docs.length; j++) { docs[j] = new ScoreDoc(0, score--); @@ -577,7 +581,8 @@ public void testConsumerSortByField() { max.updateAndGet(prev -> Math.max(prev, number)); FieldDoc[] fieldDocs = {new FieldDoc(0, Float.NaN, new Object[]{number})}; TopDocs topDocs = new TopFieldDocs(new TotalHits(1, Relation.EQUAL_TO), fieldDocs, sortFields); - QuerySearchResult result = new QuerySearchResult(i, new SearchShardTarget("node", new Index("a", "b"), i, null)); + QuerySearchResult result = new QuerySearchResult(i, new SearchShardTarget("node", new ShardId("a", "b", i), + null, OriginalIndices.NONE)); result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), docValueFormats); result.setShardIndex(i); result.size(size); @@ -614,7 +619,8 @@ public void testConsumerFieldCollapsing() { Object[] values = {randomFrom(collapseValues)}; FieldDoc[] fieldDocs = {new FieldDoc(0, Float.NaN, values)}; TopDocs topDocs = new CollapseTopFieldDocs("field", new TotalHits(1, Relation.EQUAL_TO), fieldDocs, sortFields, values); - QuerySearchResult result = new QuerySearchResult(i, new SearchShardTarget("node", new Index("a", "b"), i, null)); + QuerySearchResult result = new QuerySearchResult(i, new SearchShardTarget("node", new ShardId("a", "b", i), + null, OriginalIndices.NONE)); result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), docValueFormats); result.setShardIndex(i); result.size(size); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseExecutionExceptionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseExecutionExceptionTests.java index 9fbf3704fff21..f7a8f51d564fa 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseExecutionExceptionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseExecutionExceptionTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.TimestampParsingException; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -28,7 +29,6 @@ import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.InvalidIndexTemplateException; @@ -46,11 +46,11 @@ public void testToXContent() throws IOException { SearchPhaseExecutionException exception = new SearchPhaseExecutionException("test", "all shards failed", new ShardSearchFailure[]{ new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), - new SearchShardTarget("node_1", new Index("foo", "_na_"), 0, null)), - new ShardSearchFailure(new IndexShardClosedException(new ShardId(new Index("foo", "_na_"), 1)), - new SearchShardTarget("node_2", new Index("foo", "_na_"), 1, null)), + new SearchShardTarget("node_1", new ShardId("foo", "_na_", 0), null, OriginalIndices.NONE)), + new ShardSearchFailure(new IndexShardClosedException(new ShardId("foo", "_na_", 1)), + new SearchShardTarget("node_2", new ShardId("foo", "_na_", 1), null, OriginalIndices.NONE)), new ShardSearchFailure(new ParsingException(5, 7, "foobar", null), - new SearchShardTarget("node_3", new Index("foo", "_na_"), 2, null)), + new SearchShardTarget("node_3", new ShardId("foo", "_na_", 2), null, OriginalIndices.NONE)), }); // Failures are grouped (by default) @@ -97,7 +97,8 @@ public void testToAndFromXContent() throws IOException { new TimestampParsingException("foo", null), new NullPointerException() ); - shardSearchFailures[i] = new ShardSearchFailure(cause, new SearchShardTarget("node_" + i, new Index("test", "_na_"), i, null)); + shardSearchFailures[i] = new ShardSearchFailure(cause, new SearchShardTarget("node_" + i, + new ShardId("test", "_na_", i), null, OriginalIndices.NONE)); } final String phase = randomFrom("query", "search", "other"); diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java index fbc3b1975def5..b902fba04c5e7 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java @@ -20,11 +20,12 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.InternalScrollSearchRequest; @@ -71,7 +72,7 @@ protected void executeInitialPhase(Transport.Connection connection, InternalScro SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult = new SearchAsyncActionTests.TestSearchPhaseResult(internalRequest.id(), connection.getNode()); testSearchPhaseResult.setSearchShardTarget(new SearchShardTarget(connection.getNode().getId(), - new Index("test", "_na_"), 1, null)); + new ShardId("test", "_na_", 1), null, OriginalIndices.NONE)); searchActionListener.onResponse(testSearchPhaseResult); }).start(); } @@ -162,7 +163,7 @@ protected void executeInitialPhase(Transport.Connection connection, InternalScro SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult = new SearchAsyncActionTests.TestSearchPhaseResult(internalRequest.id(), connection.getNode()); testSearchPhaseResult.setSearchShardTarget(new SearchShardTarget(connection.getNode().getId(), - new Index("test", "_na_"), 1, null)); + new ShardId("test", "_na_", 1), null, OriginalIndices.NONE)); searchActionListener.onResponse(testSearchPhaseResult); }).start(); } @@ -235,7 +236,7 @@ protected void executeInitialPhase(Transport.Connection connection, InternalScro SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult = new SearchAsyncActionTests.TestSearchPhaseResult(internalRequest.id(), connection.getNode()); testSearchPhaseResult.setSearchShardTarget(new SearchShardTarget(connection.getNode().getId(), - new Index("test", "_na_"), 1, null)); + new ShardId("test", "_na_", 1), null, OriginalIndices.NONE)); searchActionListener.onResponse(testSearchPhaseResult); }).start(); } @@ -312,7 +313,7 @@ protected void executeInitialPhase(Transport.Connection connection, InternalScro SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult = new SearchAsyncActionTests.TestSearchPhaseResult(internalRequest.id(), connection.getNode()); testSearchPhaseResult.setSearchShardTarget(new SearchShardTarget(connection.getNode().getId(), - new Index("test", "_na_"), 1, null)); + new ShardId("test", "_na_", 1), null, OriginalIndices.NONE)); searchActionListener.onResponse(testSearchPhaseResult); } }).start(); diff --git a/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java b/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java index 29a7944f58792..3beb6fb8fdacf 100644 --- a/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java +++ b/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.ParsingException; @@ -32,7 +33,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; @@ -150,9 +151,9 @@ public void testConvert() throws IOException { RestRequest request = new FakeRestRequest(); RestChannel channel = new DetailedExceptionRestChannel(request); ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), - new SearchShardTarget("node_1", new Index("foo", "_na_"), 1, null)); + new SearchShardTarget("node_1", new ShardId("foo", "_na_", 1), null, OriginalIndices.NONE)); ShardSearchFailure failure1 = new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), - new SearchShardTarget("node_1", new Index("foo", "_na_"), 2, null)); + new SearchShardTarget("node_1", new ShardId("foo", "_na_", 2), null, OriginalIndices.NONE)); SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[] {failure, failure1}); BytesRestResponse response = new BytesRestResponse(channel, new RemoteTransportException("foo", ex)); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java index 5009594160ef7..63415d981e61d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java @@ -20,6 +20,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; @@ -33,7 +34,7 @@ import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.SearchShardTarget; @@ -87,7 +88,8 @@ public int numberOfShards() { @Override public SearchShardTarget shardTarget() { - return new SearchShardTarget("no node, this is a unit test", new Index("no index, this is a unit test", "_na_"), 0, null); + return new SearchShardTarget("no node, this is a unit test", new ShardId("no index, this is a unit test", "_na_", 0), + null, OriginalIndices.NONE); } } diff --git a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java index 1448590feb327..a9948b8b64856 100644 --- a/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java +++ b/test/framework/src/test/java/org/elasticsearch/search/MockSearchServiceTests.java @@ -21,13 +21,14 @@ import org.apache.lucene.search.Query; import org.elasticsearch.Version; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TestSearchContext; @@ -45,7 +46,7 @@ public void testAssertNoInFlightContext() { @Override public SearchShardTarget shardTarget() { - return new SearchShardTarget("node", new Index("idx", "ignored"), 0, null); + return new SearchShardTarget("node", new ShardId("idx", "ignored", 0), null, OriginalIndices.NONE); } @Override diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index 03751637a79a5..ab9fff7dbe587 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -8,6 +8,7 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; @@ -181,7 +182,7 @@ void stopExecutor() { String id = String.valueOf(i); SearchHit hit = new SearchHit(1, id, new Text("watch"), Collections.emptyMap()); hit.version(1L); - hit.shard(new SearchShardTarget("nodeId", watchIndex, 0, "whatever")); + hit.shard(new SearchShardTarget("nodeId", new ShardId(watchIndex, 0), "whatever", OriginalIndices.NONE)); hits[i] = hit; boolean active = randomBoolean(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java index 5c525e10a1919..8824af4a8762a 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java @@ -6,11 +6,12 @@ package org.elasticsearch.xpack.watcher.condition; import org.apache.lucene.search.TotalHits; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; @@ -77,7 +78,7 @@ public void testExecuteAccessHits() throws Exception { Clock.systemUTC()); SearchHit hit = new SearchHit(0, "1", new Text("type"), null); hit.score(1f); - hit.shard(new SearchShardTarget("a", new Index("a", "indexUUID"), 0, null)); + hit.shard(new SearchShardTarget("a", new ShardId("a", "indexUUID", 0), null, OriginalIndices.NONE)); InternalSearchResponse internalSearchResponse = new InternalSearchResponse( new SearchHits(new SearchHit[]{hit}, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1f), diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java index 11656d838a076..e8e0e16b6ea4f 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkAction; @@ -216,7 +217,7 @@ public void testFindTriggeredWatchesGoodCase() { BytesArray source = new BytesArray("{}"); SearchHit hit = new SearchHit(0, "first_foo", new Text(TriggeredWatchStoreField.DOC_TYPE), null); hit.version(1L); - hit.shard(new SearchShardTarget("_node_id", index, 0, null)); + hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null, OriginalIndices.NONE)); hit.sourceRef(source); SearchHits hits = new SearchHits(new SearchHit[]{hit}, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); when(searchResponse1.getHits()).thenReturn(hits); @@ -230,7 +231,7 @@ public void testFindTriggeredWatchesGoodCase() { // First return a scroll response with a single hit and then with no hits hit = new SearchHit(0, "second_foo", new Text(TriggeredWatchStoreField.DOC_TYPE), null); hit.version(1L); - hit.shard(new SearchShardTarget("_node_id", index, 0, null)); + hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null, OriginalIndices.NONE)); hit.sourceRef(source); hits = new SearchHits(new SearchHit[]{hit}, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); SearchResponse searchResponse2 = new SearchResponse( From 7f1784e9f9a3fe370a398875c07f03cd34133671 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 29 Jan 2019 15:08:11 +0100 Subject: [PATCH 53/57] Remove Dead MockTransport Code (#34044) * All these methods are unused --- .../test/gateway/NoopGatewayAllocator.java | 50 ------------------- .../test/transport/MockTransportService.java | 47 ----------------- .../test/transport/StubbableTransport.java | 6 --- 3 files changed, 103 deletions(-) delete mode 100644 test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java diff --git a/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java b/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java deleted file mode 100644 index 9966bfb47fa47..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test.gateway; - -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.allocation.FailedShard; -import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.gateway.GatewayAllocator; - -import java.util.List; - -/** - * An allocator used for tests that doesn't do anything - */ -public class NoopGatewayAllocator extends GatewayAllocator { - - public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator(); - - @Override - public void applyStartedShards(RoutingAllocation allocation, List startedShards) { - // noop - } - - @Override - public void applyFailedShards(RoutingAllocation allocation, List failedShards) { - // noop - } - - @Override - public void allocateUnassigned(RoutingAllocation allocation) { - // noop - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 403ac96104a10..1ff1fa37fc120 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -237,13 +237,6 @@ public void addFailToSendNoConnectRule(TransportService transportService, final addFailToSendNoConnectRule(transportService, new HashSet<>(Arrays.asList(blockedActions))); } - /** - * Adds a rule that will cause matching operations to throw ConnectTransportExceptions - */ - public void addFailToSendNoConnectRule(TransportAddress transportAddress, final String... blockedActions) { - addFailToSendNoConnectRule(transportAddress, new HashSet<>(Arrays.asList(blockedActions))); - } - /** * Adds a rule that will cause matching operations to throw ConnectTransportExceptions */ @@ -413,16 +406,6 @@ public boolean addSendBehavior(TransportAddress transportAddress, StubbableTrans return transport().addSendBehavior(transportAddress, sendBehavior); } - /** - * Adds a send behavior that is the default send behavior. - * - * @return {@code true} if no default send behavior was registered - */ - public boolean addSendBehavior(StubbableTransport.SendRequestBehavior behavior) { - return transport().setDefaultSendBehavior(behavior); - } - - /** * Adds a new connect behavior that is used for creating connections with the given delegate service. * @@ -445,19 +428,6 @@ public boolean addConnectBehavior(TransportAddress transportAddress, StubbableTr return transport().addConnectBehavior(transportAddress, connectBehavior); } - /** - * Adds a new get connection behavior that is used for communication with the given delegate service. - * - * @return {@code true} if no other get connection behavior was registered for any of the addresses bound by delegate service. - */ - public boolean addGetConnectionBehavior(TransportService transportService, StubbableConnectionManager.GetConnectionBehavior behavior) { - boolean noRegistered = true; - for (TransportAddress transportAddress : extractTransportAddresses(transportService)) { - noRegistered &= addGetConnectionBehavior(transportAddress, behavior); - } - return noRegistered; - } - /** * Adds a get connection behavior that is used for communication with the given delegate address. * @@ -476,19 +446,6 @@ public boolean addGetConnectionBehavior(StubbableConnectionManager.GetConnection return connectionManager().setDefaultGetConnectionBehavior(behavior); } - /** - * Adds a node connected behavior that is used for the given delegate service. - * - * @return {@code true} if no other node connected behavior was registered for any of the addresses bound by delegate service. - */ - public boolean addNodeConnectedBehavior(TransportService transportService, StubbableConnectionManager.NodeConnectedBehavior behavior) { - boolean noRegistered = true; - for (TransportAddress transportAddress : extractTransportAddresses(transportService)) { - noRegistered &= addNodeConnectedBehavior(transportAddress, behavior); - } - return noRegistered; - } - /** * Adds a node connected behavior that is used for the given delegate address. * @@ -538,10 +495,6 @@ public void addTracer(Tracer tracer) { activeTracers.add(tracer); } - public boolean removeTracer(Tracer tracer) { - return activeTracers.remove(tracer); - } - public void clearTracers() { activeTracers.clear(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java index 0014e1225c595..4f0df85fd50c8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/StubbableTransport.java @@ -55,12 +55,6 @@ public StubbableTransport(Transport transport) { this.delegate = transport; } - boolean setDefaultSendBehavior(SendRequestBehavior sendBehavior) { - SendRequestBehavior prior = defaultSendRequest; - defaultSendRequest = sendBehavior; - return prior == null; - } - public boolean setDefaultConnectBehavior(OpenConnectionBehavior openConnectionBehavior) { OpenConnectionBehavior prior = this.defaultConnectBehavior; this.defaultConnectBehavior = openConnectionBehavior; From 5d1964bcbf5c4dcdf9ea8e567a1b4211f47aff33 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Tue, 29 Jan 2019 15:09:40 +0100 Subject: [PATCH 54/57] Ignore shard started requests when primary term does not match (#37899) This commit changes the StartedShardEntry so that it also contains the primary term of the shard to start. This way the master node can also checks that the primary term from the start request is equal to the current shard's primary term in the cluster state, and it can ignore any shard started request that would concerns a previous instance of the shard that would have been allocated to the same node. Such situation are likely to happen with frozen (or restored) indices and the replication of closed indices, because with replicated closed indices the shards will be initialized again after the index is closed and can potentially be re initialized again if the index is reopened as a frozen index. In such cases the lifecycle of the shards would be something like: * shard is STARTED * index is closed * shards is INITIALIZING (index state is CLOSED, primary term is X) * index is reopened * shards are INITIALIZING again (index state is OPENED, potentially frozen, primary term is X+1) Adding the primary term to the shard started request will allow to discard potential StartedShardEntry requests received by the master node if the request concerns the shard with primary term X because it has been moved/reinitialized in the meanwhile under the primary term X+1. Relates to #33888 --- .../action/shard/ShardStateAction.java | 49 ++++-- .../cluster/IndicesClusterStateService.java | 25 ++- ...dStartedClusterStateTaskExecutorTests.java | 166 ++++++++++++------ .../action/shard/ShardStateActionTests.java | 61 ++++++- ...actIndicesClusterStateServiceTestCase.java | 9 +- .../indices/cluster/ClusterStateChanges.java | 16 +- ...ClusterStateServiceRandomUpdatesTests.java | 4 +- 7 files changed, 248 insertions(+), 82 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index e5c46cbb0ee08..4419d921a3b4a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -494,12 +494,20 @@ public int hashCode() { } } - public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener) { - shardStarted(shardRouting, message, listener, clusterService.state()); + public void shardStarted(final ShardRouting shardRouting, + final long primaryTerm, + final String message, + final Listener listener) { + shardStarted(shardRouting, primaryTerm, message, listener, clusterService.state()); } - public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener, ClusterState currentState) { - StartedShardEntry shardEntry = new StartedShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), message); - sendShardAction(SHARD_STARTED_ACTION_NAME, currentState, shardEntry, listener); + + public void shardStarted(final ShardRouting shardRouting, + final long primaryTerm, + final String message, + final Listener listener, + final ClusterState currentState) { + StartedShardEntry entry = new StartedShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), primaryTerm, message); + sendShardAction(SHARD_STARTED_ACTION_NAME, currentState, entry, listener); } private static class ShardStartedTransportHandler implements TransportRequestHandler { @@ -544,7 +552,7 @@ public ClusterTasksResult execute(ClusterState currentState, List shardRoutingsToBeApplied = new ArrayList<>(tasks.size()); Set seenShardRoutings = new HashSet<>(); // to prevent duplicates for (StartedShardEntry task : tasks) { - ShardRouting matched = currentState.getRoutingTable().getByAllocationId(task.shardId, task.allocationId); + final ShardRouting matched = currentState.getRoutingTable().getByAllocationId(task.shardId, task.allocationId); if (matched == null) { // tasks that correspond to non-existent shards are marked as successful. The reason is that we resend shard started // events on every cluster state publishing that does not contain the shard as started yet. This means that old stale @@ -553,6 +561,19 @@ public ClusterTasksResult execute(ClusterState currentState, logger.debug("{} ignoring shard started task [{}] (shard does not exist anymore)", task.shardId, task); builder.success(task); } else { + if (matched.primary() && task.primaryTerm > 0) { + final IndexMetaData indexMetaData = currentState.metaData().index(task.shardId.getIndex()); + assert indexMetaData != null; + final long currentPrimaryTerm = indexMetaData.primaryTerm(task.shardId.id()); + if (currentPrimaryTerm != task.primaryTerm) { + assert currentPrimaryTerm > task.primaryTerm : "received a primary term with a higher term than in the " + + "current cluster state (received [" + task.primaryTerm + "] but current is [" + currentPrimaryTerm + "])"; + logger.debug("{} ignoring shard started task [{}] (primary term {} does not match current term {})", + task.shardId, task, task.primaryTerm, currentPrimaryTerm); + builder.success(task); + continue; + } + } if (matched.initializing() == false) { assert matched.active() : "expected active shard routing for task " + task + " but found " + matched; // same as above, this might have been a stale in-flight request, so we just ignore. @@ -597,6 +618,7 @@ public void onFailure(String source, Exception e) { public static class StartedShardEntry extends TransportRequest { final ShardId shardId; final String allocationId; + final long primaryTerm; final String message; StartedShardEntry(StreamInput in) throws IOException { @@ -604,8 +626,12 @@ public static class StartedShardEntry extends TransportRequest { shardId = ShardId.readShardId(in); allocationId = in.readString(); if (in.getVersion().before(Version.V_6_3_0)) { - final long primaryTerm = in.readVLong(); + primaryTerm = in.readVLong(); assert primaryTerm == UNASSIGNED_PRIMARY_TERM : "shard is only started by itself: primary term [" + primaryTerm + "]"; + } else if (in.getVersion().onOrAfter(Version.V_7_0_0)) { // TODO update version to 6.7.0 after backport + primaryTerm = in.readVLong(); + } else { + primaryTerm = UNASSIGNED_PRIMARY_TERM; } this.message = in.readString(); if (in.getVersion().before(Version.V_6_3_0)) { @@ -614,9 +640,10 @@ public static class StartedShardEntry extends TransportRequest { } } - public StartedShardEntry(ShardId shardId, String allocationId, String message) { + public StartedShardEntry(final ShardId shardId, final String allocationId, final long primaryTerm, final String message) { this.shardId = shardId; this.allocationId = allocationId; + this.primaryTerm = primaryTerm; this.message = message; } @@ -627,6 +654,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(allocationId); if (out.getVersion().before(Version.V_6_3_0)) { out.writeVLong(0L); + } else if (out.getVersion().onOrAfter(Version.V_7_0_0)) { // TODO update version to 6.7.0 after backport + out.writeVLong(primaryTerm); } out.writeString(message); if (out.getVersion().before(Version.V_6_3_0)) { @@ -636,8 +665,8 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String toString() { - return String.format(Locale.ROOT, "StartedShardEntry{shardId [%s], allocationId [%s], message [%s]}", - shardId, allocationId, message); + return String.format(Locale.ROOT, "StartedShardEntry{shardId [%s], allocationId [%s], primary term [%d], message [%s]}", + shardId, allocationId, primaryTerm, message); } } diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 80ac05ece8274..5955a749fea34 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -575,13 +575,14 @@ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardR } try { - logger.debug("{} creating shard", shardRouting.shardId()); + final long primaryTerm = state.metaData().index(shardRouting.index()).primaryTerm(shardRouting.id()); + logger.debug("{} creating shard with primary term [{}]", shardRouting.shardId(), primaryTerm); RecoveryState recoveryState = new RecoveryState(shardRouting, nodes.getLocalNode(), sourceNode); indicesService.createShard( shardRouting, recoveryState, recoveryTargetService, - new RecoveryListener(shardRouting), + new RecoveryListener(shardRouting, primaryTerm), repositoriesService, failedShardHandler, globalCheckpointSyncer, @@ -598,9 +599,10 @@ private void updateShard(DiscoveryNodes nodes, ShardRouting shardRouting, Shard "local shard has a different allocation id but wasn't cleaning by removeShards. " + "cluster state: " + shardRouting + " local: " + currentRoutingEntry; + final long primaryTerm; try { final IndexMetaData indexMetaData = clusterState.metaData().index(shard.shardId().getIndex()); - final long primaryTerm = indexMetaData.primaryTerm(shard.shardId().id()); + primaryTerm = indexMetaData.primaryTerm(shard.shardId().id()); final Set inSyncIds = indexMetaData.inSyncAllocationIds(shard.shardId().id()); final IndexShardRoutingTable indexShardRoutingTable = routingTable.shardRoutingTable(shardRouting.shardId()); final Set pre60AllocationIds = indexShardRoutingTable.assignedShards() @@ -633,7 +635,7 @@ private void updateShard(DiscoveryNodes nodes, ShardRouting shardRouting, Shard shardRouting.shardId(), state, nodes.getMasterNode()); } if (nodes.getMasterNode() != null) { - shardStateAction.shardStarted(shardRouting, "master " + nodes.getMasterNode() + + shardStateAction.shardStarted(shardRouting, primaryTerm, "master " + nodes.getMasterNode() + " marked shard as initializing, but shard state is [" + state + "], mark shard as started", SHARD_STATE_ACTION_LISTENER, clusterState); } @@ -673,15 +675,24 @@ private static DiscoveryNode findSourceNodeForPeerRecovery(Logger logger, Routin private class RecoveryListener implements PeerRecoveryTargetService.RecoveryListener { + /** + * ShardRouting with which the shard was created + */ private final ShardRouting shardRouting; - private RecoveryListener(ShardRouting shardRouting) { + /** + * Primary term with which the shard was created + */ + private final long primaryTerm; + + private RecoveryListener(final ShardRouting shardRouting, final long primaryTerm) { this.shardRouting = shardRouting; + this.primaryTerm = primaryTerm; } @Override - public void onRecoveryDone(RecoveryState state) { - shardStateAction.shardStarted(shardRouting, "after " + state.getRecoverySource(), SHARD_STATE_ACTION_LISTENER); + public void onRecoveryDone(final RecoveryState state) { + shardStateAction.shardStarted(shardRouting, primaryTerm, "after " + state.getRecoverySource(), SHARD_STATE_ACTION_LISTENER); } @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java index 1d3a523cdc94f..20b7548004f4a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStartedClusterStateTaskExecutorTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.ESAllocationTestCase; import org.elasticsearch.cluster.action.shard.ShardStateAction.StartedShardEntry; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; @@ -34,7 +35,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; @@ -64,19 +64,19 @@ public void setUp() throws Exception { public void testEmptyTaskListProducesSameClusterState() throws Exception { final ClusterState clusterState = stateWithNoShard(); - assertTasksExecution(clusterState, Collections.emptyList(), result -> assertSame(clusterState, result.resultingState)); + final ClusterStateTaskExecutor.ClusterTasksResult result = executeTasks(clusterState, Collections.emptyList()); + assertSame(clusterState, result.resultingState); } public void testNonExistentIndexMarkedAsSuccessful() throws Exception { final ClusterState clusterState = stateWithNoShard(); - final StartedShardEntry entry = new StartedShardEntry(new ShardId("test", "_na", 0), "aId", "test"); - assertTasksExecution(clusterState, singletonList(entry), - result -> { - assertSame(clusterState, result.resultingState); - assertThat(result.executionResults.size(), equalTo(1)); - assertThat(result.executionResults.containsKey(entry), is(true)); - assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(entry)).isSuccess(), is(true)); - }); + final StartedShardEntry entry = new StartedShardEntry(new ShardId("test", "_na", 0), "aId", randomNonNegativeLong(), "test"); + + final ClusterStateTaskExecutor.ClusterTasksResult result = executeTasks(clusterState, singletonList(entry)); + assertSame(clusterState, result.resultingState); + assertThat(result.executionResults.size(), equalTo(1)); + assertThat(result.executionResults.containsKey(entry), is(true)); + assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(entry)).isSuccess(), is(true)); } public void testNonExistentShardsAreMarkedAsSuccessful() throws Exception { @@ -87,20 +87,19 @@ public void testNonExistentShardsAreMarkedAsSuccessful() throws Exception { final List tasks = Stream.concat( // Existent shard id but different allocation id IntStream.range(0, randomIntBetween(1, 5)) - .mapToObj(i -> new StartedShardEntry(new ShardId(indexMetaData.getIndex(), 0), String.valueOf(i), "allocation id")), + .mapToObj(i -> new StartedShardEntry(new ShardId(indexMetaData.getIndex(), 0), String.valueOf(i), 0L, "allocation id")), // Non existent shard id IntStream.range(1, randomIntBetween(2, 5)) - .mapToObj(i -> new StartedShardEntry(new ShardId(indexMetaData.getIndex(), i), String.valueOf(i), "shard id")) + .mapToObj(i -> new StartedShardEntry(new ShardId(indexMetaData.getIndex(), i), String.valueOf(i), 0L, "shard id")) ).collect(Collectors.toList()); - assertTasksExecution(clusterState, tasks, result -> { - assertSame(clusterState, result.resultingState); - assertThat(result.executionResults.size(), equalTo(tasks.size())); - tasks.forEach(task -> { - assertThat(result.executionResults.containsKey(task), is(true)); - assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); - }); + final ClusterStateTaskExecutor.ClusterTasksResult result = executeTasks(clusterState, tasks); + assertSame(clusterState, result.resultingState); + assertThat(result.executionResults.size(), equalTo(tasks.size())); + tasks.forEach(task -> { + assertThat(result.executionResults.containsKey(task), is(true)); + assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); }); } @@ -119,16 +118,16 @@ public void testNonInitializingShardAreMarkedAsSuccessful() throws Exception { } else { allocationId = shardRoutingTable.replicaShards().iterator().next().allocationId().getId(); } - return new StartedShardEntry(shardId, allocationId, "test"); + final long primaryTerm = indexMetaData.primaryTerm(shardId.id()); + return new StartedShardEntry(shardId, allocationId, primaryTerm, "test"); }).collect(Collectors.toList()); - assertTasksExecution(clusterState, tasks, result -> { - assertSame(clusterState, result.resultingState); - assertThat(result.executionResults.size(), equalTo(tasks.size())); - tasks.forEach(task -> { - assertThat(result.executionResults.containsKey(task), is(true)); - assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); - }); + final ClusterStateTaskExecutor.ClusterTasksResult result = executeTasks(clusterState, tasks); + assertSame(clusterState, result.resultingState); + assertThat(result.executionResults.size(), equalTo(tasks.size())); + tasks.forEach(task -> { + assertThat(result.executionResults.containsKey(task), is(true)); + assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); }); } @@ -138,26 +137,26 @@ public void testStartedShards() throws Exception { final IndexMetaData indexMetaData = clusterState.metaData().index(indexName); final ShardId shardId = new ShardId(indexMetaData.getIndex(), 0); + final long primaryTerm = indexMetaData.primaryTerm(shardId.id()); final ShardRouting primaryShard = clusterState.routingTable().shardRoutingTable(shardId).primaryShard(); final String primaryAllocationId = primaryShard.allocationId().getId(); final List tasks = new ArrayList<>(); - tasks.add(new StartedShardEntry(shardId, primaryAllocationId, "test")); + tasks.add(new StartedShardEntry(shardId, primaryAllocationId, primaryTerm, "test")); if (randomBoolean()) { final ShardRouting replicaShard = clusterState.routingTable().shardRoutingTable(shardId).replicaShards().iterator().next(); final String replicaAllocationId = replicaShard.allocationId().getId(); - tasks.add(new StartedShardEntry(shardId, replicaAllocationId, "test")); + tasks.add(new StartedShardEntry(shardId, replicaAllocationId, primaryTerm, "test")); } - assertTasksExecution(clusterState, tasks, result -> { - assertNotSame(clusterState, result.resultingState); - assertThat(result.executionResults.size(), equalTo(tasks.size())); - tasks.forEach(task -> { - assertThat(result.executionResults.containsKey(task), is(true)); - assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); - - final IndexShardRoutingTable shardRoutingTable = result.resultingState.routingTable().shardRoutingTable(task.shardId); - assertThat(shardRoutingTable.getByAllocationId(task.allocationId).state(), is(ShardRoutingState.STARTED)); - }); + final ClusterStateTaskExecutor.ClusterTasksResult result = executeTasks(clusterState, tasks); + assertNotSame(clusterState, result.resultingState); + assertThat(result.executionResults.size(), equalTo(tasks.size())); + tasks.forEach(task -> { + assertThat(result.executionResults.containsKey(task), is(true)); + assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); + + final IndexShardRoutingTable shardRoutingTable = result.resultingState.routingTable().shardRoutingTable(task.shardId); + assertThat(shardRoutingTable.getByAllocationId(task.allocationId).state(), is(ShardRoutingState.STARTED)); }); } @@ -169,29 +168,88 @@ public void testDuplicateStartsAreOkay() throws Exception { final ShardId shardId = new ShardId(indexMetaData.getIndex(), 0); final ShardRouting shardRouting = clusterState.routingTable().shardRoutingTable(shardId).primaryShard(); final String allocationId = shardRouting.allocationId().getId(); + final long primaryTerm = indexMetaData.primaryTerm(shardId.id()); final List tasks = IntStream.range(0, randomIntBetween(2, 10)) - .mapToObj(i -> new StartedShardEntry(shardId, allocationId, "test")) + .mapToObj(i -> new StartedShardEntry(shardId, allocationId, primaryTerm, "test")) .collect(Collectors.toList()); - assertTasksExecution(clusterState, tasks, result -> { - assertNotSame(clusterState, result.resultingState); - assertThat(result.executionResults.size(), equalTo(tasks.size())); - tasks.forEach(task -> { - assertThat(result.executionResults.containsKey(task), is(true)); - assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); - - final IndexShardRoutingTable shardRoutingTable = result.resultingState.routingTable().shardRoutingTable(task.shardId); - assertThat(shardRoutingTable.getByAllocationId(task.allocationId).state(), is(ShardRoutingState.STARTED)); - }); + final ClusterStateTaskExecutor.ClusterTasksResult result = executeTasks(clusterState, tasks); + assertNotSame(clusterState, result.resultingState); + assertThat(result.executionResults.size(), equalTo(tasks.size())); + tasks.forEach(task -> { + assertThat(result.executionResults.containsKey(task), is(true)); + assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); + + final IndexShardRoutingTable shardRoutingTable = result.resultingState.routingTable().shardRoutingTable(task.shardId); + assertThat(shardRoutingTable.getByAllocationId(task.allocationId).state(), is(ShardRoutingState.STARTED)); }); } - private void assertTasksExecution(final ClusterState state, - final List tasks, - final Consumer consumer) throws Exception { + public void testPrimaryTermsMismatch() throws Exception { + final String indexName = "test"; + final int shard = 0; + final int primaryTerm = 2 + randomInt(200); + + ClusterState clusterState = state(indexName, randomBoolean(), ShardRoutingState.INITIALIZING, ShardRoutingState.INITIALIZING); + clusterState = ClusterState.builder(clusterState) + .metaData(MetaData.builder(clusterState.metaData()) + .put(IndexMetaData.builder(clusterState.metaData().index(indexName)) + .primaryTerm(shard, primaryTerm) + .build(), true) + .build()) + .build(); + final ShardId shardId = new ShardId(clusterState.metaData().index(indexName).getIndex(), shard); + final String primaryAllocationId = clusterState.routingTable().shardRoutingTable(shardId).primaryShard().allocationId().getId(); + { + final StartedShardEntry task = + new StartedShardEntry(shardId, primaryAllocationId, primaryTerm - 1, "primary terms does not match on primary"); + + final ClusterStateTaskExecutor.ClusterTasksResult result = executeTasks(clusterState, singletonList(task)); + assertSame(clusterState, result.resultingState); + assertThat(result.executionResults.size(), equalTo(1)); + assertThat(result.executionResults.containsKey(task), is(true)); + assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); + IndexShardRoutingTable shardRoutingTable = result.resultingState.routingTable().shardRoutingTable(task.shardId); + assertThat(shardRoutingTable.getByAllocationId(task.allocationId).state(), is(ShardRoutingState.INITIALIZING)); + assertSame(clusterState, result.resultingState); + } + { + final StartedShardEntry task = + new StartedShardEntry(shardId, primaryAllocationId, primaryTerm, "primary terms match on primary"); + + final ClusterStateTaskExecutor.ClusterTasksResult result = executeTasks(clusterState, singletonList(task)); + assertNotSame(clusterState, result.resultingState); + assertThat(result.executionResults.size(), equalTo(1)); + assertThat(result.executionResults.containsKey(task), is(true)); + assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); + IndexShardRoutingTable shardRoutingTable = result.resultingState.routingTable().shardRoutingTable(task.shardId); + assertThat(shardRoutingTable.getByAllocationId(task.allocationId).state(), is(ShardRoutingState.STARTED)); + assertNotSame(clusterState, result.resultingState); + clusterState = result.resultingState; + } + { + final long replicaPrimaryTerm = randomBoolean() ? primaryTerm : primaryTerm - 1; + final String replicaAllocationId = clusterState.routingTable().shardRoutingTable(shardId).replicaShards().iterator().next() + .allocationId().getId(); + + final StartedShardEntry task = new StartedShardEntry(shardId, replicaAllocationId, replicaPrimaryTerm, "test on replica"); + + final ClusterStateTaskExecutor.ClusterTasksResult result = executeTasks(clusterState, singletonList(task)); + assertNotSame(clusterState, result.resultingState); + assertThat(result.executionResults.size(), equalTo(1)); + assertThat(result.executionResults.containsKey(task), is(true)); + assertThat(((ClusterStateTaskExecutor.TaskResult) result.executionResults.get(task)).isSuccess(), is(true)); + IndexShardRoutingTable shardRoutingTable = result.resultingState.routingTable().shardRoutingTable(task.shardId); + assertThat(shardRoutingTable.getByAllocationId(task.allocationId).state(), is(ShardRoutingState.STARTED)); + assertNotSame(clusterState, result.resultingState); + } + } + + private ClusterStateTaskExecutor.ClusterTasksResult executeTasks(final ClusterState state, + final List tasks) throws Exception { final ClusterStateTaskExecutor.ClusterTasksResult result = executor.execute(state, tasks); assertThat(result, notNullValue()); - consumer.accept(result); + return result; } } diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index e94a974ae7a89..a800c0c79929c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -72,12 +72,14 @@ import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; +import static org.elasticsearch.test.VersionUtils.randomCompatibleVersion; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.sameInstance; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class ShardStateActionTests extends ESTestCase { @@ -420,8 +422,9 @@ public void testShardStarted() throws InterruptedException { setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); final ShardRouting shardRouting = getRandomShardRouting(index); + final long primaryTerm = clusterService.state().metaData().index(shardRouting.index()).primaryTerm(shardRouting.id()); final TestListener listener = new TestListener(); - shardStateAction.shardStarted(shardRouting, "testShardStarted", listener); + shardStateAction.shardStarted(shardRouting, primaryTerm, "testShardStarted", listener); final CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests[0].request, instanceOf(ShardStateAction.StartedShardEntry.class)); @@ -429,6 +432,7 @@ public void testShardStarted() throws InterruptedException { ShardStateAction.StartedShardEntry entry = (ShardStateAction.StartedShardEntry) capturedRequests[0].request; assertThat(entry.shardId, equalTo(shardRouting.shardId())); assertThat(entry.allocationId, equalTo(shardRouting.allocationId().getId())); + assertThat(entry.primaryTerm, equalTo(primaryTerm)); transport.handleResponse(capturedRequests[0].requestId, TransportResponse.Empty.INSTANCE); listener.await(); @@ -481,7 +485,7 @@ public void testShardEntryBWCSerialize() throws Exception { final ShardId shardId = new ShardId(randomRealisticUnicodeOfLengthBetween(10, 100), UUID.randomUUID().toString(), between(0, 1000)); final String allocationId = randomRealisticUnicodeOfCodepointLengthBetween(10, 100); final String reason = randomRealisticUnicodeOfCodepointLengthBetween(10, 100); - try (StreamInput in = serialize(new StartedShardEntry(shardId, allocationId, reason), bwcVersion).streamInput()) { + try (StreamInput in = serialize(new StartedShardEntry(shardId, allocationId, 0L, reason), bwcVersion).streamInput()) { in.setVersion(bwcVersion); final FailedShardEntry failedShardEntry = new FailedShardEntry(in); assertThat(failedShardEntry.shardId, equalTo(shardId)); @@ -490,8 +494,7 @@ public void testShardEntryBWCSerialize() throws Exception { assertThat(failedShardEntry.failure, nullValue()); assertThat(failedShardEntry.markAsStale, equalTo(true)); } - try (StreamInput in = serialize(new FailedShardEntry(shardId, allocationId, 0L, - reason, null, false), bwcVersion).streamInput()) { + try (StreamInput in = serialize(new FailedShardEntry(shardId, allocationId, 0L, reason, null, false), bwcVersion).streamInput()) { in.setVersion(bwcVersion); final StartedShardEntry startedShardEntry = new StartedShardEntry(in); assertThat(startedShardEntry.shardId, equalTo(shardId)); @@ -500,6 +503,56 @@ public void testShardEntryBWCSerialize() throws Exception { } } + public void testFailedShardEntrySerialization() throws Exception { + final ShardId shardId = new ShardId(randomRealisticUnicodeOfLengthBetween(10, 100), UUID.randomUUID().toString(), between(0, 1000)); + final String allocationId = randomRealisticUnicodeOfCodepointLengthBetween(10, 100); + final long primaryTerm = randomIntBetween(0, 100); + final String message = randomRealisticUnicodeOfCodepointLengthBetween(10, 100); + final Exception failure = randomBoolean() ? null : getSimulatedFailure(); + final boolean markAsStale = randomBoolean(); + + final Version version = randomFrom(randomCompatibleVersion(random(), Version.CURRENT)); + final FailedShardEntry failedShardEntry = new FailedShardEntry(shardId, allocationId, primaryTerm, message, failure, markAsStale); + try (StreamInput in = serialize(failedShardEntry, version).streamInput()) { + in.setVersion(version); + final FailedShardEntry deserialized = new FailedShardEntry(in); + assertThat(deserialized.shardId, equalTo(shardId)); + assertThat(deserialized.allocationId, equalTo(allocationId)); + assertThat(deserialized.primaryTerm, equalTo(primaryTerm)); + assertThat(deserialized.message, equalTo(message)); + if (failure != null) { + assertThat(deserialized.failure, notNullValue()); + assertThat(deserialized.failure.getClass(), equalTo(failure.getClass())); + assertThat(deserialized.failure.getMessage(), equalTo(failure.getMessage())); + } else { + assertThat(deserialized.failure, nullValue()); + } + assertThat(deserialized.markAsStale, equalTo(markAsStale)); + assertEquals(failedShardEntry, deserialized); + } + } + + public void testStartedShardEntrySerialization() throws Exception { + final ShardId shardId = new ShardId(randomRealisticUnicodeOfLengthBetween(10, 100), UUID.randomUUID().toString(), between(0, 1000)); + final String allocationId = randomRealisticUnicodeOfCodepointLengthBetween(10, 100); + final long primaryTerm = randomIntBetween(0, 100); + final String message = randomRealisticUnicodeOfCodepointLengthBetween(10, 100); + + final Version version = randomFrom(randomCompatibleVersion(random(), Version.CURRENT)); + try (StreamInput in = serialize(new StartedShardEntry(shardId, allocationId, primaryTerm, message), version).streamInput()) { + in.setVersion(version); + final StartedShardEntry deserialized = new StartedShardEntry(in); + assertThat(deserialized.shardId, equalTo(shardId)); + assertThat(deserialized.allocationId, equalTo(allocationId)); + if (version.onOrAfter(Version.V_7_0_0)) { // TODO update version to 6.7.0 after backport + assertThat(deserialized.primaryTerm, equalTo(primaryTerm)); + } else { + assertThat(deserialized.primaryTerm, equalTo(0L)); + } + assertThat(deserialized.message, equalTo(message)); + } + } + BytesReference serialize(Writeable writeable, Version version) throws IOException { try (BytesStreamOutput out = new BytesStreamOutput()) { out.setVersion(version); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index f248d46b11744..9b6cae43081ad 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -361,11 +361,14 @@ public void updateShardState(ShardRouting shardRouting, assertThat(this.shardId(), equalTo(shardRouting.shardId())); assertTrue("current: " + this.shardRouting + ", got: " + shardRouting, this.shardRouting.isSameAllocation(shardRouting)); if (this.shardRouting.active()) { - assertTrue("and active shard must stay active, current: " + this.shardRouting + ", got: " + shardRouting, + assertTrue("an active shard must stay active, current: " + this.shardRouting + ", got: " + shardRouting, shardRouting.active()); } if (this.shardRouting.primary()) { assertTrue("a primary shard can't be demoted", shardRouting.primary()); + if (this.shardRouting.initializing()) { + assertEquals("primary term can not be updated on an initializing primary shard: " + shardRouting, term, newPrimaryTerm); + } } else if (shardRouting.primary()) { // note: it's ok for a replica in post recovery to be started and promoted at once // this can happen when the primary failed after we sent the start shard message @@ -390,6 +393,10 @@ public IndexShardState state() { return null; } + public long term() { + return term; + } + public void updateTerm(long newTerm) { assertThat("term can only be incremented: " + shardRouting, newTerm, greaterThanOrEqualTo(term)); if (shardRouting.primary() && shardRouting.active()) { diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 8a00be28f5eb2..c1e32be9d29af 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -277,10 +277,18 @@ public ClusterState applyFailedShards(ClusterState clusterState, List startedShards) { - List entries = startedShards.stream().map(startedShard -> - new StartedShardEntry(startedShard.shardId(), startedShard.allocationId().getId(), "shard started")) - .collect(Collectors.toList()); - return runTasks(shardStartedClusterStateTaskExecutor, clusterState, entries); + final Map entries = startedShards.stream() + .collect(Collectors.toMap(Function.identity(), startedShard -> { + final IndexMetaData indexMetaData = clusterState.metaData().index(startedShard.shardId().getIndex()); + return indexMetaData != null ? indexMetaData.primaryTerm(startedShard.shardId().id()) : 0L; + })); + return applyStartedShards(clusterState, entries); + } + + public ClusterState applyStartedShards(ClusterState clusterState, Map startedShards) { + return runTasks(shardStartedClusterStateTaskExecutor, clusterState, startedShards.entrySet().stream() + .map(e -> new StartedShardEntry(e.getKey().shardId(), e.getKey().allocationId().getId(), e.getValue(), "shard started")) + .collect(Collectors.toList())); } private ClusterState runTasks(ClusterStateTaskExecutor executor, ClusterState clusterState, List entries) { diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index b400b56b34d55..e664cc87452fc 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -384,7 +384,7 @@ public ClusterState randomlyUpdateClusterState(ClusterState state, } // randomly start and fail allocated shards - List startedShards = new ArrayList<>(); + final Map startedShards = new HashMap<>(); List failedShards = new ArrayList<>(); for (DiscoveryNode node : state.nodes()) { IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node); @@ -393,7 +393,7 @@ public ClusterState randomlyUpdateClusterState(ClusterState state, for (MockIndexShard indexShard : indexService) { ShardRouting persistedShardRouting = indexShard.routingEntry(); if (persistedShardRouting.initializing() && randomBoolean()) { - startedShards.add(persistedShardRouting); + startedShards.put(persistedShardRouting, indexShard.term()); } else if (rarely()) { failedShards.add(new FailedShard(persistedShardRouting, "fake shard failure", new Exception(), randomBoolean())); } From 65a9b61a9162d4a9f5181b164ce4d23ee343b56e Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Tue, 29 Jan 2019 09:18:05 -0500 Subject: [PATCH 55/57] Add Seq# based optimistic concurrency control to UpdateRequest (#37872) The update request has a lesser known support for a one off update of a known document version. This PR adds an a seq# based alternative to power these operations. Relates #36148 Relates #10708 --- .../java/org/elasticsearch/client/CrudIT.java | 56 +++++++++---- docs/reference/docs/delete.asciidoc | 2 +- docs/reference/docs/index_.asciidoc | 2 +- docs/reference/docs/update.asciidoc | 8 ++ .../resources/rest-api-spec/api/update.json | 8 ++ .../test/update/35_if_seq_no.yml | 64 +++++++++++++++ .../elasticsearch/action/DocWriteRequest.java | 64 +++++++++++++++ .../action/bulk/BulkRequest.java | 1 + .../action/delete/DeleteRequest.java | 21 +---- .../action/index/IndexRequest.java | 21 +---- .../action/update/UpdateHelper.java | 4 +- .../action/update/UpdateRequest.java | 82 +++++++++++++++++++ .../action/update/UpdateRequestBuilder.java | 24 ++++++ .../elasticsearch/index/engine/Engine.java | 30 +++++++ .../index/engine/InternalEngine.java | 6 ++ .../index/get/ShardGetService.java | 24 +++--- .../action/document/RestUpdateAction.java | 2 + .../index/engine/InternalEngineTests.java | 32 ++++++++ .../index/shard/ShardGetServiceTests.java | 34 ++++++-- .../org/elasticsearch/update/UpdateIT.java | 42 ++++++++++ 20 files changed, 450 insertions(+), 77 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/update/35_if_seq_no.yml diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index eddb6b5758452..1bf1f2487cd29 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -68,9 +68,9 @@ import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.rest.action.document.RestDeleteAction; import org.elasticsearch.rest.action.document.RestGetAction; +import org.elasticsearch.rest.action.document.RestIndexAction; import org.elasticsearch.rest.action.document.RestMultiGetAction; import org.elasticsearch.rest.action.document.RestUpdateAction; -import org.elasticsearch.rest.action.document.RestIndexAction; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; @@ -90,8 +90,10 @@ import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThan; @@ -606,22 +608,46 @@ public void testUpdate() throws IOException { IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals(RestStatus.CREATED, indexResponse.status()); - UpdateRequest updateRequest = new UpdateRequest("index", "id"); - updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values())); - UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); - assertEquals(RestStatus.OK, updateResponse.status()); - assertEquals(indexResponse.getVersion() + 1, updateResponse.getVersion()); - - UpdateRequest updateRequestConflict = new UpdateRequest("index", "id"); - updateRequestConflict.doc(singletonMap("field", "with_version_conflict"), randomFrom(XContentType.values())); - updateRequestConflict.version(indexResponse.getVersion()); + long lastUpdateSeqNo; + long lastUpdatePrimaryTerm; + { + UpdateRequest updateRequest = new UpdateRequest("index", "id"); + updateRequest.doc(singletonMap("field", "updated"), randomFrom(XContentType.values())); + final UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.OK, updateResponse.status()); + assertEquals(indexResponse.getVersion() + 1, updateResponse.getVersion()); + lastUpdateSeqNo = updateResponse.getSeqNo(); + lastUpdatePrimaryTerm = updateResponse.getPrimaryTerm(); + assertThat(lastUpdateSeqNo, greaterThanOrEqualTo(0L)); + assertThat(lastUpdatePrimaryTerm, greaterThanOrEqualTo(1L)); + } - ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> - execute(updateRequestConflict, highLevelClient()::update, highLevelClient()::updateAsync)); - assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[_doc][id]: version conflict, " + - "current version [2] is different than the one provided [1]]", exception.getMessage()); + { + final UpdateRequest updateRequest = new UpdateRequest("index", "id"); + updateRequest.doc(singletonMap("field", "with_seq_no_conflict"), randomFrom(XContentType.values())); + if (randomBoolean()) { + updateRequest.setIfSeqNo(lastUpdateSeqNo + 1); + updateRequest.setIfPrimaryTerm(lastUpdatePrimaryTerm); + } else { + updateRequest.setIfSeqNo(lastUpdateSeqNo + (randomBoolean() ? 0 : 1)); + updateRequest.setIfPrimaryTerm(lastUpdatePrimaryTerm + 1); + } + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> + execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync)); + assertEquals(exception.toString(),RestStatus.CONFLICT, exception.status()); + assertThat(exception.getMessage(), containsString("Elasticsearch exception [type=version_conflict_engine_exception")); + } + { + final UpdateRequest updateRequest = new UpdateRequest("index", "id"); + updateRequest.doc(singletonMap("field", "with_seq_no"), randomFrom(XContentType.values())); + updateRequest.setIfSeqNo(lastUpdateSeqNo); + updateRequest.setIfPrimaryTerm(lastUpdatePrimaryTerm); + final UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + assertEquals(RestStatus.OK, updateResponse.status()); + assertEquals(lastUpdateSeqNo + 1, updateResponse.getSeqNo()); + assertEquals(lastUpdatePrimaryTerm, updateResponse.getPrimaryTerm()); + } } { IndexRequest indexRequest = new IndexRequest("index").id("with_script"); diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index bc6f7b840048d..22301b98f1031 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -39,7 +39,7 @@ The result of the above delete operation is: [[optimistic-concurrency-control-delete]] === Optimistic concurrency control -Delete operations can be made optional and only be performed if the last +Delete operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index c7ca42bfaf4c4..257b88289d87a 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -185,7 +185,7 @@ The result of the above index operation is: [[optimistic-concurrency-control-index]] === Optimistic concurrency control -Index operations can be made optional and only be performed if the last +Index operations can be made conditional and only be performed if the last modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 1cfc122bee402..42840b1b0a5ec 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -349,3 +349,11 @@ version numbers being out of sync with the external system. Use the <> instead. ===================================================== + +`if_seq_no` and `if_primary_term`:: + +Update operations can be made conditional and only be performed if the last +modification to the document was assigned the sequence number and primary +term specified by the `if_seq_no` and `if_primary_term` parameters. If a +mismatch is detected, the operation will result in a `VersionConflictException` +and a status code of 409. See <> for more details. \ No newline at end of file diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json index 37710cf07a10b..92f1013a317c3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update.json @@ -63,6 +63,14 @@ "type": "time", "description": "Explicit operation timeout" }, + "if_seq_no" : { + "type" : "number", + "description" : "only perform the update operation if the last operation that has changed the document has the specified sequence number" + }, + "if_primary_term" : { + "type" : "number", + "description" : "only perform the update operation if the last operation that has changed the document has the specified primary term" + }, "version": { "type": "number", "description": "Explicit version number for concurrency control" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_if_seq_no.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_if_seq_no.yml new file mode 100644 index 0000000000000..dbc569104cf4c --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/35_if_seq_no.yml @@ -0,0 +1,64 @@ +--- +"Update with if_seq_no": + + - skip: + version: " - 6.99.99" + reason: if_seq_no was added in 7.0 + + - do: + catch: missing + update: + index: test_1 + id: 1 + if_seq_no: 1 + if_primary_term: 1 + body: + doc: { foo: baz } + + - do: + index: + index: test_1 + id: 1 + body: + foo: baz + + - do: + catch: conflict + update: + index: test_1 + id: 1 + if_seq_no: 234 + if_primary_term: 1 + body: + doc: { foo: baz } + + - do: + update: + index: test_1 + id: 1 + if_seq_no: 0 + if_primary_term: 1 + body: + doc: { foo: bar } + + - do: + get: + index: test_1 + id: 1 + + - match: { _source: { foo: bar } } + + - do: + bulk: + body: + - update: + _index: test_1 + _id: 1 + if_seq_no: 100 + if_primary_term: 200 + - doc: + foo: baz + + - match: { errors: true } + - match: { items.0.update.status: 409 } + diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index fdf62e951a517..d8a9a3503a617 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -24,11 +24,16 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.index.VersionType; import java.io.IOException; import java.util.Locale; +import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; + /** * Generic interface to group ActionRequest, which perform writes to a single document * Action requests implementing this can be part of {@link org.elasticsearch.action.bulk.BulkRequest} @@ -117,6 +122,39 @@ public interface DocWriteRequest extends IndicesRequest { */ T versionType(VersionType versionType); + /** + * only perform this request if the document was last modification was assigned the given + * sequence number. Must be used in combination with {@link #setIfPrimaryTerm(long)} + * + * If the document last modification was assigned a different sequence number a + * {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown. + */ + T setIfSeqNo(long seqNo); + + /** + * only performs this request if the document was last modification was assigned the given + * primary term. Must be used in combination with {@link #setIfSeqNo(long)} + * + * If the document last modification was assigned a different term a + * {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown. + */ + T setIfPrimaryTerm(long term); + + /** + * If set, only perform this request if the document was last modification was assigned this sequence number. + * If the document last modification was assigned a different sequence number a + * {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown. + */ + long ifSeqNo(); + + /** + * If set, only perform this request if the document was last modification was assigned this primary term. + * + * If the document last modification was assigned a different term a + * {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown. + */ + long ifPrimaryTerm(); + /** * Get the requested document operation type of the request * @return the operation type {@link OpType} @@ -216,4 +254,30 @@ static void writeDocumentRequest(StreamOutput out, DocWriteRequest request) throw new IllegalStateException("invalid request [" + request.getClass().getSimpleName() + " ]"); } } + + static ActionRequestValidationException validateSeqNoBasedCASParams( + DocWriteRequest request, ActionRequestValidationException validationException) { + if (request.versionType().validateVersionForWrites(request.version()) == false) { + validationException = addValidationError("illegal version value [" + request.version() + "] for version type [" + + request.versionType().name() + "]", validationException); + } + if (request.versionType() == VersionType.FORCE) { + validationException = addValidationError("version type [force] may no longer be used", validationException); + } + + if (request.ifSeqNo() != UNASSIGNED_SEQ_NO && ( + request.versionType() != VersionType.INTERNAL || request.version() != Versions.MATCH_ANY + )) { + validationException = addValidationError("compare and write operations can not use versioning", validationException); + } + if (request.ifPrimaryTerm() == UNASSIGNED_PRIMARY_TERM && request.ifSeqNo() != UNASSIGNED_SEQ_NO) { + validationException = addValidationError("ifSeqNo is set, but primary term is [0]", validationException); + } + if (request.ifPrimaryTerm() != UNASSIGNED_PRIMARY_TERM && request.ifSeqNo() == UNASSIGNED_SEQ_NO) { + validationException = + addValidationError("ifSeqNo is unassigned, but primary term is [" + request.ifPrimaryTerm() + "]", validationException); + } + + return validationException; + } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index e58b0dfbffbd3..b5c786ab2df6d 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -503,6 +503,7 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null } else if ("update".equals(action)) { UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).retryOnConflict(retryOnConflict) .version(version).versionType(versionType) + .setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm) .routing(routing); // EMPTY is safe here because we never call namedObject try (InputStream dataStream = sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType).streamInput(); diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index 8d2967fd28ba4..a033bf3cb000f 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -110,27 +110,8 @@ public ActionRequestValidationException validate() { if (Strings.isEmpty(id)) { validationException = addValidationError("id is missing", validationException); } - if (versionType.validateVersionForWrites(version) == false) { - validationException = addValidationError("illegal version value [" + version + "] for version type [" - + versionType.name() + "]", validationException); - } - if (versionType == VersionType.FORCE) { - validationException = addValidationError("version type [force] may no longer be used", validationException); - } - - if (ifSeqNo != UNASSIGNED_SEQ_NO && ( - versionType != VersionType.INTERNAL || version != Versions.MATCH_ANY - )) { - validationException = addValidationError("compare and write operations can not use versioning", validationException); - } - if (ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM && ifSeqNo != UNASSIGNED_SEQ_NO) { - validationException = addValidationError("ifSeqNo is set, but primary term is [0]", validationException); - } - if (ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM && ifSeqNo == UNASSIGNED_SEQ_NO) { - validationException = - addValidationError("ifSeqNo is unassigned, but primary term is [" + ifPrimaryTerm + "]", validationException); - } + validationException = DocWriteRequest.validateSeqNoBasedCASParams(this, validationException); return validationException; } diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index a9aac3025de1e..37d960831776d 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -188,14 +188,7 @@ public ActionRequestValidationException validate() { addValidationError("an id is required for a " + opType() + " operation", validationException); } - if (!versionType.validateVersionForWrites(resolvedVersion)) { - validationException = addValidationError("illegal version value [" + resolvedVersion + "] for version type [" - + versionType.name() + "]", validationException); - } - - if (versionType == VersionType.FORCE) { - validationException = addValidationError("version type [force] may no longer be used", validationException); - } + validationException = DocWriteRequest.validateSeqNoBasedCASParams(this, validationException); if (id != null && id.getBytes(StandardCharsets.UTF_8).length > 512) { validationException = addValidationError("id is too long, must be no longer than 512 bytes but was: " + @@ -210,18 +203,6 @@ public ActionRequestValidationException validate() { validationException = addValidationError("pipeline cannot be an empty string", validationException); } - if (ifSeqNo != UNASSIGNED_SEQ_NO && ( - versionType != VersionType.INTERNAL || version != Versions.MATCH_ANY - )) { - validationException = addValidationError("compare and write operations can not use versioning", validationException); - } - if (ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM && ifSeqNo != UNASSIGNED_SEQ_NO) { - validationException = addValidationError("ifSeqNo is set, but primary term is [0]", validationException); - } - if (ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM && ifSeqNo == UNASSIGNED_SEQ_NO) { - validationException = - addValidationError("ifSeqNo is unassigned, but primary term is [" + ifPrimaryTerm + "]", validationException); - } return validationException; } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index a8a5fb8f72f30..8cd6146768fff 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -69,8 +69,8 @@ public UpdateHelper(ScriptService scriptService) { * Prepares an update request by converting it into an index or delete request or an update response (no action). */ public Result prepare(UpdateRequest request, IndexShard indexShard, LongSupplier nowInMillis) { - final GetResult getResult = indexShard.getService().getForUpdate(request.type(), request.id(), request.version(), - request.versionType()); + final GetResult getResult = indexShard.getService().getForUpdate( + request.type(), request.id(), request.version(), request.versionType(), request.ifSeqNo(), request.ifPrimaryTerm()); return prepare(indexShard.shardId(), request, getResult, nowInMillis); } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index a7805b4cbdbad..2a1865aa80818 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -54,6 +54,8 @@ import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; public class UpdateRequest extends InstanceShardOperationRequest implements DocWriteRequest, WriteRequest, ToXContentObject { @@ -66,6 +68,8 @@ public class UpdateRequest extends InstanceShardOperationRequest private static final ParseField DOC_AS_UPSERT_FIELD = new ParseField("doc_as_upsert"); private static final ParseField DETECT_NOOP_FIELD = new ParseField("detect_noop"); private static final ParseField SOURCE_FIELD = new ParseField("_source"); + private static final ParseField IF_SEQ_NO = new ParseField("if_seq_no"); + private static final ParseField IF_PRIMARY_TERM = new ParseField("if_primary_term"); static { PARSER = new ObjectParser<>(UpdateRequest.class.getSimpleName()); @@ -89,6 +93,8 @@ public class UpdateRequest extends InstanceShardOperationRequest PARSER.declareField(UpdateRequest::fetchSource, (parser, context) -> FetchSourceContext.fromXContent(parser), SOURCE_FIELD, ObjectParser.ValueType.OBJECT_ARRAY_BOOLEAN_OR_STRING); + PARSER.declareLong(UpdateRequest::setIfSeqNo, IF_SEQ_NO); + PARSER.declareLong(UpdateRequest::setIfPrimaryTerm, IF_PRIMARY_TERM); } // Set to null initially so we can know to override in bulk requests that have a default type. @@ -105,6 +111,9 @@ public class UpdateRequest extends InstanceShardOperationRequest private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; private int retryOnConflict = 0; + private long ifSeqNo = UNASSIGNED_SEQ_NO; + private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM; + private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; @@ -170,6 +179,16 @@ public ActionRequestValidationException validate() { } } + validationException = DocWriteRequest.validateSeqNoBasedCASParams(this, validationException); + + if (ifSeqNo != UNASSIGNED_SEQ_NO && retryOnConflict > 0) { + validationException = addValidationError("compare and write operations can not be retried", validationException); + } + + if (ifSeqNo != UNASSIGNED_SEQ_NO && docAsUpsert) { + validationException = addValidationError("compare and write operations can not be used with upsert", validationException); + } + if (script == null && doc == null) { validationException = addValidationError("script or doc is missing", validationException); } @@ -531,6 +550,55 @@ public VersionType versionType() { return this.versionType; } + /** + * only perform this update request if the document's modification was assigned the given + * sequence number. Must be used in combination with {@link #setIfPrimaryTerm(long)} + * + * If the document last modification was assigned a different sequence number a + * {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown. + */ + public UpdateRequest setIfSeqNo(long seqNo) { + if (seqNo < 0 && seqNo != UNASSIGNED_SEQ_NO) { + throw new IllegalArgumentException("sequence numbers must be non negative. got [" + seqNo + "]."); + } + ifSeqNo = seqNo; + return this; + } + + /** + * only performs this update request if the document's last modification was assigned the given + * primary term. Must be used in combination with {@link #setIfSeqNo(long)} + * + * If the document last modification was assigned a different term a + * {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown. + */ + public UpdateRequest setIfPrimaryTerm(long term) { + if (term < 0) { + throw new IllegalArgumentException("primary term must be non negative. got [" + term + "]"); + } + ifPrimaryTerm = term; + return this; + } + + /** + * If set, only perform this update request if the document was last modification was assigned this sequence number. + * If the document last modification was assigned a different sequence number a + * {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown. + */ + public long ifSeqNo() { + return ifSeqNo; + } + + /** + * If set, only perform this update request if the document was last modification was assigned this primary term. + * + * If the document last modification was assigned a different term a + * {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown. + */ + public long ifPrimaryTerm() { + return ifPrimaryTerm; + } + @Override public OpType opType() { return OpType.UPDATE; @@ -811,6 +879,10 @@ public void readFrom(StreamInput in) throws IOException { docAsUpsert = in.readBoolean(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + ifSeqNo = in.readZLong(); + ifPrimaryTerm = in.readVLong(); + } detectNoop = in.readBoolean(); scriptedUpsert = in.readBoolean(); } @@ -862,6 +934,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(docAsUpsert); out.writeLong(version); out.writeByte(versionType.getValue()); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeZLong(ifSeqNo); + out.writeVLong(ifPrimaryTerm); + } out.writeBoolean(detectNoop); out.writeBoolean(scriptedUpsert); } @@ -880,6 +956,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.copyCurrentStructure(parser); } } + + if (ifSeqNo != UNASSIGNED_SEQ_NO) { + builder.field(IF_SEQ_NO.getPreferredName(), ifSeqNo); + builder.field(IF_PRIMARY_TERM.getPreferredName(), ifPrimaryTerm); + } + if (script != null) { builder.field("script", script); } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index 181dba6a10734..919f460e8c07b 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -150,6 +150,30 @@ public UpdateRequestBuilder setVersionType(VersionType versionType) { return this; } + /** + * only perform this update request if the document was last modification was assigned the given + * sequence number. Must be used in combination with {@link #setIfPrimaryTerm(long)} + * + * If the document last modification was assigned a different sequence number a + * {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown. + */ + public UpdateRequestBuilder setIfSeqNo(long seqNo) { + request.setIfSeqNo(seqNo); + return this; + } + + /** + * only perform this update request if the document was last modification was assigned the given + * primary term. Must be used in combination with {@link #setIfSeqNo(long)} + * + * If the document last modification was assigned a different term a + * {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown. + */ + public UpdateRequestBuilder setIfPrimaryTerm(long term) { + request.setIfPrimaryTerm(term); + return this; + } + /** * Sets the number of shard copies that must be active before proceeding with the write. * See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details. diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index e1df104d338df..e450e93e9d397 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -72,6 +72,7 @@ import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; @@ -622,6 +623,13 @@ protected final GetResult getFromSearcher(Get get, BiFunction search throw new VersionConflictEngineException(shardId, get.type(), get.id(), get.versionType().explainConflictForReads(versionValue.version, get.version())); } + if (get.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( + get.getIfSeqNo() != versionValue.seqNo || get.getIfPrimaryTerm() != versionValue.term + )) { + throw new VersionConflictEngineException(shardId, get.type(), get.id(), + get.getIfSeqNo(), get.getIfPrimaryTerm(), versionValue.seqNo, versionValue.term); + } if (get.isReadFromTranslog()) { // this is only used for updates - API _GET calls will always read form a reader for consistency // the update call doesn't need the consistency since it's source only + _parent but parent can go away in 7.0 diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 6d58b981ddc53..9fb1cb804946f 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; @@ -56,6 +55,9 @@ import java.util.Map; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; + public final class ShardGetService extends AbstractIndexShardComponent { private final MapperService mapperService; private final MeanMetric existsMetric = new MeanMetric(); @@ -77,15 +79,17 @@ public GetStats stats() { public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) { - return get(type, id, gFields, realtime, version, versionType, fetchSourceContext, false); + return + get(type, id, gFields, realtime, version, versionType, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, fetchSourceContext, false); } private GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, - FetchSourceContext fetchSourceContext, boolean readFromTranslog) { + long ifSeqNo, long ifPrimaryTerm, FetchSourceContext fetchSourceContext, boolean readFromTranslog) { currentMetric.inc(); try { long now = System.nanoTime(); - GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext, readFromTranslog); + GetResult getResult = + innerGet(type, id, gFields, realtime, version, versionType, ifSeqNo, ifPrimaryTerm, fetchSourceContext, readFromTranslog); if (getResult.isExists()) { existsMetric.inc(System.nanoTime() - now); @@ -98,8 +102,8 @@ private GetResult get(String type, String id, String[] gFields, boolean realtime } } - public GetResult getForUpdate(String type, String id, long version, VersionType versionType) { - return get(type, id, new String[]{RoutingFieldMapper.NAME}, true, version, versionType, + public GetResult getForUpdate(String type, String id, long version, VersionType versionType, long ifSeqNo, long ifPrimaryTerm) { + return get(type, id, new String[]{RoutingFieldMapper.NAME}, true, version, versionType, ifSeqNo, ifPrimaryTerm, FetchSourceContext.FETCH_SOURCE, true); } @@ -113,7 +117,7 @@ public GetResult getForUpdate(String type, String id, long version, VersionType public GetResult get(Engine.GetResult engineGetResult, String id, String type, String[] fields, FetchSourceContext fetchSourceContext) { if (!engineGetResult.exists()) { - return new GetResult(shardId.getIndexName(), type, id, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, -1, false, null, null); + return new GetResult(shardId.getIndexName(), type, id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null); } currentMetric.inc(); @@ -151,7 +155,7 @@ private FetchSourceContext normalizeFetchSourceContent(@Nullable FetchSourceCont } private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, - FetchSourceContext fetchSourceContext, boolean readFromTranslog) { + long ifSeqNo, long ifPrimaryTerm, FetchSourceContext fetchSourceContext, boolean readFromTranslog) { fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields); if (type == null || type.equals("_all")) { DocumentMapper mapper = mapperService.documentMapper(); @@ -162,14 +166,14 @@ private GetResult innerGet(String type, String id, String[] gFields, boolean rea if (type != null) { Term uidTerm = new Term(IdFieldMapper.NAME, Uid.encodeId(id)); get = indexShard.get(new Engine.Get(realtime, readFromTranslog, type, id, uidTerm) - .version(version).versionType(versionType)); + .version(version).versionType(versionType).setIfSeqNo(ifSeqNo).setIfPrimaryTerm(ifPrimaryTerm)); if (get.exists() == false) { get.close(); } } if (get == null || get.exists() == false) { - return new GetResult(shardId.getIndexName(), type, id, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, -1, false, null, null); + return new GetResult(shardId.getIndexName(), type, id, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM, -1, false, null, null); } try { diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java index 033176c4a7300..463a18ea6b802 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestUpdateAction.java @@ -86,6 +86,8 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC updateRequest.version(RestActions.parseVersion(request)); updateRequest.versionType(VersionType.fromString(request.param("version_type"), updateRequest.versionType())); + updateRequest.setIfSeqNo(request.paramAsLong("if_seq_no", updateRequest.ifSeqNo())); + updateRequest.setIfPrimaryTerm(request.paramAsLong("if_primary_term", updateRequest.ifPrimaryTerm())); request.applyContentParser(parser -> { updateRequest.fromXContent(parser); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index edf1925fdd798..f57bff72fc57c 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -1378,6 +1378,38 @@ public void testVersionedUpdate() throws IOException { } + public void testGetIfSeqNoIfPrimaryTerm() throws IOException { + final BiFunction searcherFactory = engine::acquireSearcher; + + ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null); + Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED); + Engine.IndexResult indexResult = engine.index(create); + if (randomBoolean()) { + engine.refresh("test"); + } + if (randomBoolean()) { + engine.flush(); + } + try (Engine.GetResult get = engine.get( + new Engine.Get(true, true, doc.type(), doc.id(), create.uid()) + .setIfSeqNo(indexResult.getSeqNo()).setIfPrimaryTerm(primaryTerm.get()), + searcherFactory)) { + assertEquals(indexResult.getSeqNo(), get.docIdAndVersion().seqNo); + } + + expectThrows(VersionConflictEngineException.class, () -> engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()) + .setIfSeqNo(indexResult.getSeqNo() + 1).setIfPrimaryTerm(primaryTerm.get()), + searcherFactory)); + + expectThrows(VersionConflictEngineException.class, () -> engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()) + .setIfSeqNo(indexResult.getSeqNo()).setIfPrimaryTerm(primaryTerm.get() + 1), + searcherFactory)); + + expectThrows(VersionConflictEngineException.class, () -> engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()) + .setIfSeqNo(indexResult.getSeqNo() + 1).setIfPrimaryTerm(primaryTerm.get() + 1), + searcherFactory)); + } + public void testVersioningNewIndex() throws IOException { ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null); Engine.Index index = indexForDoc(doc); diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java index 7db904f89dfa8..14e513ff89cfe 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -20,17 +20,21 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.mapper.RoutingFieldMapper; import java.io.IOException; import java.nio.charset.StandardCharsets; +import static org.elasticsearch.common.lucene.uid.Versions.MATCH_ANY; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; + public class ShardGetServiceTests extends IndexShardTestCase { public void testGetForUpdate() throws IOException { @@ -47,7 +51,8 @@ public void testGetForUpdate() throws IOException { recoverShardFromStore(primary); Engine.IndexResult test = indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); assertTrue(primary.getEngine().refreshNeeded()); - GetResult testGet = primary.getService().getForUpdate("test", "0", test.getVersion(), VersionType.INTERNAL); + GetResult testGet = primary.getService().getForUpdate( + "test", "0", test.getVersion(), VersionType.INTERNAL, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { @@ -56,7 +61,8 @@ public void testGetForUpdate() throws IOException { Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); - GetResult testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + GetResult testGet1 = primary.getService().getForUpdate( + "test", "1", test1.getVersion(), VersionType.INTERNAL, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); @@ -69,13 +75,22 @@ public void testGetForUpdate() throws IOException { } // now again from the reader - test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); + Engine.IndexResult test2 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar"); assertTrue(primary.getEngine().refreshNeeded()); - testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + testGet1 = primary.getService().getForUpdate("test", "1", test2.getVersion(), VersionType.INTERNAL, + UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); + final long primaryTerm = primary.operationPrimaryTerm; + testGet1 = primary.getService().getForUpdate("test", "1", MATCH_ANY, VersionType.INTERNAL, test2.getSeqNo(), primaryTerm); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + + expectThrows(VersionConflictEngineException.class, () -> + primary.getService().getForUpdate("test", "1", MATCH_ANY, VersionType.INTERNAL, test2.getSeqNo() + 1, primaryTerm)); + expectThrows(VersionConflictEngineException.class, () -> + primary.getService().getForUpdate("test", "1", MATCH_ANY, VersionType.INTERNAL, test2.getSeqNo(), primaryTerm + 1)); closeShards(primary); } @@ -93,13 +108,16 @@ public void testTypelessGetForUpdate() throws IOException { Engine.IndexResult indexResult = indexDoc(shard, "some_type", "0", "{\"foo\" : \"bar\"}"); assertTrue(indexResult.isCreated()); - GetResult getResult = shard.getService().getForUpdate("some_type", "0", Versions.MATCH_ANY, VersionType.INTERNAL); + GetResult getResult = shard.getService().getForUpdate( + "some_type", "0", MATCH_ANY, VersionType.INTERNAL, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertTrue(getResult.isExists()); - getResult = shard.getService().getForUpdate("some_other_type", "0", Versions.MATCH_ANY, VersionType.INTERNAL); + getResult = shard.getService().getForUpdate( + "some_other_type", "0", MATCH_ANY, VersionType.INTERNAL, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertFalse(getResult.isExists()); - getResult = shard.getService().getForUpdate("_doc", "0", Versions.MATCH_ANY, VersionType.INTERNAL); + getResult = shard.getService().getForUpdate( + "_doc", "0", MATCH_ANY, VersionType.INTERNAL, UNASSIGNED_SEQ_NO, UNASSIGNED_PRIMARY_TERM); assertTrue(getResult.isExists()); closeShards(shard); diff --git a/server/src/test/java/org/elasticsearch/update/UpdateIT.java b/server/src/test/java/org/elasticsearch/update/UpdateIT.java index 05b27758ee434..7652c503450ae 100644 --- a/server/src/test/java/org/elasticsearch/update/UpdateIT.java +++ b/server/src/test/java/org/elasticsearch/update/UpdateIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.action.update.UpdateResponse; @@ -36,7 +37,9 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.engine.DocumentMissingException; +import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -411,6 +414,45 @@ public void testUpdate() throws Exception { } } + public void testUpdateWithIfSeqNo() throws Exception { + createTestIndex(); + ensureGreen(); + + IndexResponse result = client().prepareIndex("test", "type1", "1").setSource("field", 1).get(); + expectThrows(VersionConflictEngineException.class, () -> + client().prepareUpdate(indexOrAlias(), "type1", "1") + .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 2).endObject()) + .setIfSeqNo(result.getSeqNo() + 1) + .setIfPrimaryTerm(result.getPrimaryTerm()) + .get() + ); + + expectThrows(VersionConflictEngineException.class, () -> + client().prepareUpdate(indexOrAlias(), "type1", "1") + .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 2).endObject()) + .setIfSeqNo(result.getSeqNo()) + .setIfPrimaryTerm(result.getPrimaryTerm() + 1) + .get() + ); + + expectThrows(VersionConflictEngineException.class, () -> + client().prepareUpdate(indexOrAlias(), "type1", "1") + .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 2).endObject()) + .setIfSeqNo(result.getSeqNo() + 1) + .setIfPrimaryTerm(result.getPrimaryTerm() + 1) + .get() + ); + + UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1") + .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 2).endObject()) + .setIfSeqNo(result.getSeqNo()) + .setIfPrimaryTerm(result.getPrimaryTerm()) + .get(); + + assertThat(updateResponse.status(), equalTo(RestStatus.OK)); + assertThat(updateResponse.getSeqNo(), equalTo(result.getSeqNo() + 1)); + } + public void testUpdateRequestWithBothScriptAndDoc() throws Exception { createTestIndex(); ensureGreen(); From 09a11a34effe7e9f81157560f5936b2790c50201 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Tue, 29 Jan 2019 15:31:49 +0100 Subject: [PATCH 56/57] Remove clusterAlias instance member from QueryShardContext (#37923) The clusterAlias member is only used in the copy constructor, to be able to reconstruct the fully qualified index. It is also possible to remove the instance member and add a private constructor that accepts the already built Index object which contains the cluster alias. --- .../index/query/QueryShardContext.java | 29 ++++++++++++------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index c398fde04a2f6..2b5415115895d 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -86,7 +86,6 @@ public class QueryShardContext extends QueryRewriteContext { private final BiFunction> indexFieldDataService; private final int shardId; private final IndexReader reader; - private final String clusterAlias; private String[] types = Strings.EMPTY_ARRAY; private boolean cacheable = true; private final SetOnce frozen = new SetOnce<>(); @@ -110,6 +109,23 @@ public QueryShardContext(int shardId, IndexSettings indexSettings, BitsetFilterC SimilarityService similarityService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, NamedWriteableRegistry namedWriteableRegistry, Client client, IndexReader reader, LongSupplier nowInMillis, String clusterAlias) { + this(shardId, indexSettings, bitsetFilterCache, indexFieldDataLookup, mapperService, similarityService, scriptService, + xContentRegistry, namedWriteableRegistry, client, reader, nowInMillis, new Index(RemoteClusterAware.buildRemoteIndexName( + clusterAlias, indexSettings.getIndex().getName()), indexSettings.getIndex().getUUID())); + } + + public QueryShardContext(QueryShardContext source) { + this(source.shardId, source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService, + source.similarityService, source.scriptService, source.getXContentRegistry(), source.getWriteableRegistry(), + source.client, source.reader, source.nowInMillis, source.fullyQualifiedIndex); + this.types = source.getTypes(); + } + + private QueryShardContext(int shardId, IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache, + BiFunction> indexFieldDataLookup, MapperService mapperService, + SimilarityService similarityService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, + NamedWriteableRegistry namedWriteableRegistry, Client client, IndexReader reader, LongSupplier nowInMillis, + Index fullyQualifiedIndex) { super(xContentRegistry, namedWriteableRegistry,client, nowInMillis); this.shardId = shardId; this.similarityService = similarityService; @@ -121,16 +137,7 @@ public QueryShardContext(int shardId, IndexSettings indexSettings, BitsetFilterC this.scriptService = scriptService; this.indexSettings = indexSettings; this.reader = reader; - this.clusterAlias = clusterAlias; - this.fullyQualifiedIndex = new Index(RemoteClusterAware.buildRemoteIndexName(clusterAlias, indexSettings.getIndex().getName()), - indexSettings.getIndex().getUUID()); - } - - public QueryShardContext(QueryShardContext source) { - this(source.shardId, source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService, - source.similarityService, source.scriptService, source.getXContentRegistry(), source.getWriteableRegistry(), - source.client, source.reader, source.nowInMillis, source.clusterAlias); - this.types = source.getTypes(); + this.fullyQualifiedIndex = fullyQualifiedIndex; } private void reset() { From 3c9f7031b92dab442305eb9bbc1df48e5d4aa6da Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 29 Jan 2019 15:41:05 +0100 Subject: [PATCH 57/57] Enforce cluster UUIDs (#37775) This commit adds join validation around cluster UUIDs, preventing a node to join a cluster if it was previously part of another cluster. The commit introduces a new flag to the cluster state, clusterUUIDCommitted, which denotes whether the node has locked into a cluster with the given uuid. When a cluster is committed, this flag will turn to true, and subsequent cluster state updates will keep the information about committal. Note that coordinating-only nodes are still free to switch clusters at will (after restart), as they don't carry any persistent state. --- .../coordination/CoordinationState.java | 26 ++++- .../cluster/coordination/Coordinator.java | 16 ++- .../cluster/coordination/JoinHelper.java | 10 +- .../cluster/metadata/MetaData.java | 55 ++++++++-- .../coordination/CoordinatorTests.java | 101 ++++++++++++++++-- .../cluster/coordination/JoinHelperTests.java | 2 +- .../cluster/metadata/MetaDataTests.java | 37 +++++++ .../discovery/ClusterDisruptionIT.java | 28 +++++ .../gateway/ClusterStateUpdatersTests.java | 6 +- .../GatewayMetaStatePersistedStateTests.java | 10 +- 10 files changed, 261 insertions(+), 30 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java index 4d542566ccd70..dff6b5add0b09 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationState.java @@ -422,7 +422,7 @@ public void handleCommit(ApplyCommitRequest applyCommit) { logger.trace("handleCommit: applying commit request for term [{}] and version [{}]", applyCommit.getTerm(), applyCommit.getVersion()); - persistedState.markLastAcceptedConfigAsCommitted(); + persistedState.markLastAcceptedStateAsCommitted(); assert getLastCommittedConfiguration().equals(getLastAcceptedConfiguration()); } @@ -471,16 +471,32 @@ public interface PersistedState { /** * Marks the last accepted cluster state as committed. * After a successful call to this method, {@link #getLastAcceptedState()} should return the last cluster state that was set, - * with the last committed configuration now corresponding to the last accepted configuration. + * with the last committed configuration now corresponding to the last accepted configuration, and the cluster uuid, if set, + * marked as committed. */ - default void markLastAcceptedConfigAsCommitted() { + default void markLastAcceptedStateAsCommitted() { final ClusterState lastAcceptedState = getLastAcceptedState(); + MetaData.Builder metaDataBuilder = null; if (lastAcceptedState.getLastAcceptedConfiguration().equals(lastAcceptedState.getLastCommittedConfiguration()) == false) { final CoordinationMetaData coordinationMetaData = CoordinationMetaData.builder(lastAcceptedState.coordinationMetaData()) .lastCommittedConfiguration(lastAcceptedState.getLastAcceptedConfiguration()) .build(); - final MetaData metaData = MetaData.builder(lastAcceptedState.metaData()).coordinationMetaData(coordinationMetaData).build(); - setLastAcceptedState(ClusterState.builder(lastAcceptedState).metaData(metaData).build()); + metaDataBuilder = MetaData.builder(lastAcceptedState.metaData()); + metaDataBuilder.coordinationMetaData(coordinationMetaData); + } + // if we receive a commit from a Zen1 master that has not recovered its state yet, the cluster uuid might not been known yet. + assert lastAcceptedState.metaData().clusterUUID().equals(MetaData.UNKNOWN_CLUSTER_UUID) == false || + lastAcceptedState.term() == ZEN1_BWC_TERM : + "received cluster state with empty cluster uuid but not Zen1 BWC term: " + lastAcceptedState; + if (lastAcceptedState.metaData().clusterUUID().equals(MetaData.UNKNOWN_CLUSTER_UUID) == false && + lastAcceptedState.metaData().clusterUUIDCommitted() == false) { + if (metaDataBuilder == null) { + metaDataBuilder = MetaData.builder(lastAcceptedState.metaData()); + } + metaDataBuilder.clusterUUIDCommitted(true); + } + if (metaDataBuilder != null) { + setLastAcceptedState(ClusterState.builder(lastAcceptedState).metaData(metaDataBuilder).build()); } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 4bf977f8398ce..dff9cdcb8a2a5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -147,7 +147,7 @@ public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSe this.masterService = masterService; this.onJoinValidators = JoinTaskExecutor.addBuiltInJoinValidators(onJoinValidators); this.joinHelper = new JoinHelper(settings, allocationService, masterService, transportService, - this::getCurrentTerm, this::handleJoinRequest, this::joinLeaderInTerm, this.onJoinValidators); + this::getCurrentTerm, this::getStateForMasterService, this::handleJoinRequest, this::joinLeaderInTerm, this.onJoinValidators); this.persistedStateSupplier = persistedStateSupplier; this.discoverySettings = new DiscoverySettings(settings, clusterSettings); this.lastKnownLeader = Optional.empty(); @@ -281,7 +281,18 @@ PublishWithJoinResponse handlePublishRequest(PublishRequest publishRequest) { + lastKnownLeader + ", rejecting"); } - if (publishRequest.getAcceptedState().term() > coordinationState.get().getLastAcceptedState().term()) { + final ClusterState localState = coordinationState.get().getLastAcceptedState(); + + if (localState.metaData().clusterUUIDCommitted() && + localState.metaData().clusterUUID().equals(publishRequest.getAcceptedState().metaData().clusterUUID()) == false) { + logger.warn("received cluster state from {} with a different cluster uuid {} than local cluster uuid {}, rejecting", + sourceNode, publishRequest.getAcceptedState().metaData().clusterUUID(), localState.metaData().clusterUUID()); + throw new CoordinationStateRejectedException("received cluster state from " + sourceNode + + " with a different cluster uuid " + publishRequest.getAcceptedState().metaData().clusterUUID() + + " than local cluster uuid " + localState.metaData().clusterUUID() + ", rejecting"); + } + + if (publishRequest.getAcceptedState().term() > localState.term()) { // only do join validation if we have not accepted state from this master yet onJoinValidators.forEach(a -> a.accept(getLocalNode(), publishRequest.getAcceptedState())); } @@ -653,6 +664,7 @@ public void invariant() { assert followersChecker.getFastResponseState().term == getCurrentTerm() : followersChecker.getFastResponseState(); assert followersChecker.getFastResponseState().mode == getMode() : followersChecker.getFastResponseState(); assert (applierState.nodes().getMasterNodeId() == null) == applierState.blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID); + assert applierState.nodes().getMasterNodeId() == null || applierState.metaData().clusterUUIDCommitted(); assert preVoteCollector.getPreVoteResponse().equals(getPreVoteResponse()) : preVoteCollector + " vs " + getPreVoteResponse(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index 53fada396fcef..a9309e9fe638a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -62,6 +62,7 @@ import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.LongSupplier; +import java.util.function.Supplier; public class JoinHelper { @@ -84,7 +85,7 @@ public class JoinHelper { final Set> pendingOutgoingJoins = ConcurrentCollections.newConcurrentSet(); public JoinHelper(Settings settings, AllocationService allocationService, MasterService masterService, - TransportService transportService, LongSupplier currentTermSupplier, + TransportService transportService, LongSupplier currentTermSupplier, Supplier currentStateSupplier, BiConsumer joinHandler, Function joinLeaderInTerm, Collection> joinValidators) { this.masterService = masterService; @@ -132,6 +133,13 @@ public ClusterTasksResult execute(ClusterState currentSta transportService.registerRequestHandler(VALIDATE_JOIN_ACTION_NAME, MembershipAction.ValidateJoinRequest::new, ThreadPool.Names.GENERIC, (request, channel, task) -> { + final ClusterState localState = currentStateSupplier.get(); + if (localState.metaData().clusterUUIDCommitted() && + localState.metaData().clusterUUID().equals(request.getState().metaData().clusterUUID()) == false) { + throw new CoordinationStateRejectedException("join validation on cluster state" + + " with a different cluster uuid " + request.getState().metaData().clusterUUID() + + " than local cluster uuid " + localState.metaData().clusterUUID() + ", rejecting"); + } joinValidators.forEach(action -> action.accept(transportService.getLocalNode(), request.getState())); channel.sendResponse(Empty.INSTANCE); }); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 3cce3f791d2b8..54c3001d9036f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -88,6 +88,7 @@ public class MetaData implements Iterable, Diffable, To private static final Logger logger = LogManager.getLogger(MetaData.class); public static final String ALL = "_all"; + public static final String UNKNOWN_CLUSTER_UUID = "_na_"; public enum XContentContext { /* Custom metadata should be returns as part of API call */ @@ -159,6 +160,7 @@ public interface Custom extends NamedDiffable, ToXContentFragment, Clust private static final NamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>(Custom.class); private final String clusterUUID; + private final boolean clusterUUIDCommitted; private final long version; private final CoordinationMetaData coordinationMetaData; @@ -179,12 +181,13 @@ public interface Custom extends NamedDiffable, ToXContentFragment, Clust private final SortedMap aliasAndIndexLookup; - MetaData(String clusterUUID, long version, CoordinationMetaData coordinationMetaData, + MetaData(String clusterUUID, boolean clusterUUIDCommitted, long version, CoordinationMetaData coordinationMetaData, Settings transientSettings, Settings persistentSettings, ImmutableOpenMap indices, ImmutableOpenMap templates, ImmutableOpenMap customs, String[] allIndices, String[] allOpenIndices, String[] allClosedIndices, SortedMap aliasAndIndexLookup) { this.clusterUUID = clusterUUID; + this.clusterUUIDCommitted = clusterUUIDCommitted; this.version = version; this.coordinationMetaData = coordinationMetaData; this.transientSettings = transientSettings; @@ -218,6 +221,14 @@ public String clusterUUID() { return this.clusterUUID; } + /** + * Whether the current node with the given cluster state is locked into the cluster with the UUID returned by {@link #clusterUUID()}, + * meaning that it will not accept any cluster state with a different clusterUUID. + */ + public boolean clusterUUIDCommitted() { + return this.clusterUUIDCommitted; + } + /** * Returns the merged transient and persistent settings. */ @@ -757,6 +768,12 @@ public static boolean isGlobalStateEquals(MetaData metaData1, MetaData metaData2 if (!metaData1.templates.equals(metaData2.templates())) { return false; } + if (!metaData1.clusterUUID.equals(metaData2.clusterUUID)) { + return false; + } + if (metaData1.clusterUUIDCommitted != metaData2.clusterUUIDCommitted) { + return false; + } // Check if any persistent metadata needs to be saved int customCount1 = 0; for (ObjectObjectCursor cursor : metaData1.customs) { @@ -798,6 +815,7 @@ private static class MetaDataDiff implements Diff { private long version; private String clusterUUID; + private boolean clusterUUIDCommitted; private CoordinationMetaData coordinationMetaData; private Settings transientSettings; private Settings persistentSettings; @@ -807,6 +825,7 @@ private static class MetaDataDiff implements Diff { MetaDataDiff(MetaData before, MetaData after) { clusterUUID = after.clusterUUID; + clusterUUIDCommitted = after.clusterUUIDCommitted; version = after.version; coordinationMetaData = after.coordinationMetaData; transientSettings = after.transientSettings; @@ -818,8 +837,11 @@ private static class MetaDataDiff implements Diff { MetaDataDiff(StreamInput in) throws IOException { clusterUUID = in.readString(); + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + clusterUUIDCommitted = in.readBoolean(); + } version = in.readLong(); - if (in.getVersion().onOrAfter(Version.V_7_0_0)) { //TODO revisit after Zen2 BWC is implemented + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { coordinationMetaData = new CoordinationMetaData(in); } else { coordinationMetaData = CoordinationMetaData.EMPTY_META_DATA; @@ -836,6 +858,9 @@ private static class MetaDataDiff implements Diff { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(clusterUUID); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeBoolean(clusterUUIDCommitted); + } out.writeLong(version); if (out.getVersion().onOrAfter(Version.V_7_0_0)) { coordinationMetaData.writeTo(out); @@ -851,6 +876,7 @@ public void writeTo(StreamOutput out) throws IOException { public MetaData apply(MetaData part) { Builder builder = builder(); builder.clusterUUID(clusterUUID); + builder.clusterUUIDCommitted(clusterUUIDCommitted); builder.version(version); builder.coordinationMetaData(coordinationMetaData); builder.transientSettings(transientSettings); @@ -866,6 +892,9 @@ public static MetaData readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); builder.version = in.readLong(); builder.clusterUUID = in.readString(); + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + builder.clusterUUIDCommitted = in.readBoolean(); + } if (in.getVersion().onOrAfter(Version.V_7_0_0)) { builder.coordinationMetaData(new CoordinationMetaData(in)); } @@ -891,6 +920,9 @@ public static MetaData readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeString(clusterUUID); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeBoolean(clusterUUIDCommitted); + } if (out.getVersion().onOrAfter(Version.V_7_0_0)) { coordinationMetaData.writeTo(out); } @@ -930,6 +962,7 @@ public static Builder builder(MetaData metaData) { public static class Builder { private String clusterUUID; + private boolean clusterUUIDCommitted; private long version; private CoordinationMetaData coordinationMetaData = CoordinationMetaData.EMPTY_META_DATA; @@ -941,7 +974,7 @@ public static class Builder { private final ImmutableOpenMap.Builder customs; public Builder() { - clusterUUID = "_na_"; + clusterUUID = UNKNOWN_CLUSTER_UUID; indices = ImmutableOpenMap.builder(); templates = ImmutableOpenMap.builder(); customs = ImmutableOpenMap.builder(); @@ -950,6 +983,7 @@ public Builder() { public Builder(MetaData metaData) { this.clusterUUID = metaData.clusterUUID; + this.clusterUUIDCommitted = metaData.clusterUUIDCommitted; this.coordinationMetaData = metaData.coordinationMetaData; this.transientSettings = metaData.transientSettings; this.persistentSettings = metaData.persistentSettings; @@ -1125,8 +1159,13 @@ public Builder clusterUUID(String clusterUUID) { return this; } + public Builder clusterUUIDCommitted(boolean clusterUUIDCommitted) { + this.clusterUUIDCommitted = clusterUUIDCommitted; + return this; + } + public Builder generateClusterUuidIfNeeded() { - if (clusterUUID.equals("_na_")) { + if (clusterUUID.equals(UNKNOWN_CLUSTER_UUID)) { clusterUUID = UUIDs.randomBase64UUID(); } return this; @@ -1182,8 +1221,9 @@ public MetaData build() { String[] allOpenIndicesArray = allOpenIndices.toArray(new String[allOpenIndices.size()]); String[] allClosedIndicesArray = allClosedIndices.toArray(new String[allClosedIndices.size()]); - return new MetaData(clusterUUID, version, coordinationMetaData, transientSettings, persistentSettings, indices.build(), - templates.build(), customs.build(), allIndicesArray, allOpenIndicesArray, allClosedIndicesArray, aliasAndIndexLookup); + return new MetaData(clusterUUID, clusterUUIDCommitted, version, coordinationMetaData, transientSettings, persistentSettings, + indices.build(), templates.build(), customs.build(), allIndicesArray, allOpenIndicesArray, allClosedIndicesArray, + aliasAndIndexLookup); } private SortedMap buildAliasAndIndexLookup() { @@ -1226,6 +1266,7 @@ public static void toXContent(MetaData metaData, XContentBuilder builder, ToXCon builder.field("version", metaData.version()); builder.field("cluster_uuid", metaData.clusterUUID); + builder.field("cluster_uuid_committed", metaData.clusterUUIDCommitted); builder.startObject("cluster_coordination"); metaData.coordinationMetaData().toXContent(builder, params); @@ -1324,6 +1365,8 @@ public static MetaData fromXContent(XContentParser parser) throws IOException { builder.version = parser.longValue(); } else if ("cluster_uuid".equals(currentFieldName) || "uuid".equals(currentFieldName)) { builder.clusterUUID = parser.text(); + } else if ("cluster_uuid_committed".equals(currentFieldName)) { + builder.clusterUUIDCommitted = parser.booleanValue(); } else { throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 36495914bddec..c3028de1801da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -20,6 +20,7 @@ import com.carrotsearch.randomizedtesting.RandomizedContext; import org.apache.logging.log4j.CloseableThreadContext; +import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -36,6 +37,7 @@ import org.elasticsearch.cluster.coordination.CoordinationState.PersistedState; import org.elasticsearch.cluster.coordination.Coordinator.Mode; import org.elasticsearch.cluster.coordination.CoordinatorTests.Cluster.ClusterNode; +import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; @@ -48,6 +50,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -59,9 +62,11 @@ import org.elasticsearch.discovery.zen.PublishClusterStateStats; import org.elasticsearch.discovery.zen.UnicastHostsProvider.HostsResolver; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.MockGatewayMetaState; import org.elasticsearch.indices.cluster.FakeThreadPoolMasterService; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.disruption.DisruptableMockTransport; import org.elasticsearch.test.disruption.DisruptableMockTransport.ConnectionStatus; import org.elasticsearch.transport.TransportService; @@ -84,6 +89,7 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; @@ -137,6 +143,13 @@ public class CoordinatorTests extends ESTestCase { private final List nodeEnvironments = new ArrayList<>(); + private final AtomicInteger nextNodeIndex = new AtomicInteger(); + + @Before + public void resetNodeIndexBeforeEachTest() { + nextNodeIndex.set(0); + } + @After public void closeNodeEnvironmentsAfterEachTest() { for (NodeEnvironment nodeEnvironment : nodeEnvironments) { @@ -153,6 +166,7 @@ public void resetPortCounterBeforeEachTest() { // check that runRandomly leads to reproducible results public void testRepeatableTests() throws Exception { final Callable test = () -> { + resetNodeIndexBeforeEachTest(); final Cluster cluster = new Cluster(randomIntBetween(1, 5)); cluster.runRandomly(); final long afterRunRandomly = value(cluster.getAnyNode().getLastAppliedClusterState()); @@ -1001,6 +1015,52 @@ public void testClusterCannotFormWithFailingJoinValidation() { assertTrue(cluster.clusterNodes.stream().allMatch(cn -> cn.getLastAppliedClusterState().version() == 0)); } + public void testCannotJoinClusterWithDifferentUUID() throws IllegalAccessException { + final Cluster cluster1 = new Cluster(randomIntBetween(1, 3)); + cluster1.runRandomly(); + cluster1.stabilise(); + + final Cluster cluster2 = new Cluster(3); + cluster2.runRandomly(); + cluster2.stabilise(); + + final ClusterNode shiftedNode = randomFrom(cluster2.clusterNodes).restartedNode(); + final ClusterNode newNode = cluster1.new ClusterNode(nextNodeIndex.getAndIncrement(), + shiftedNode.getLocalNode(), n -> shiftedNode.persistedState); + cluster1.clusterNodes.add(newNode); + + MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.start(); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test1", + JoinHelper.class.getCanonicalName(), + Level.INFO, + "*failed to join*")); + Logger joinLogger = LogManager.getLogger(JoinHelper.class); + Loggers.addAppender(joinLogger, mockAppender); + cluster1.runFor(DEFAULT_STABILISATION_TIME, "failing join validation"); + try { + mockAppender.assertAllExpectationsMatched(); + } finally { + Loggers.removeAppender(joinLogger, mockAppender); + mockAppender.stop(); + } + assertTrue(newNode.getLastAppliedClusterState().version() == 0); + + // reset clusterUUIDCommitted (and node / cluster state term) to let node join again + // TODO: use elasticsearch-node detach-cluster tool once it's implemented + final ClusterNode detachedNode = newNode.restartedNode( + metaData -> MetaData.builder(metaData) + .clusterUUIDCommitted(false) + .coordinationMetaData(CoordinationMetaData.builder(metaData.coordinationMetaData()) + .term(0L).build()) + .build(), + term -> 0L); + cluster1.clusterNodes.replaceAll(cn -> cn == newNode ? detachedNode : cn); + cluster1.stabilise(); + } + private static long defaultMillis(Setting setting) { return setting.get(Settings.EMPTY).millis() + Cluster.DEFAULT_DELAY_VARIABILITY; } @@ -1077,7 +1137,8 @@ class Cluster { final Set masterEligibleNodeIds = new HashSet<>(initialNodeCount); clusterNodes = new ArrayList<>(initialNodeCount); for (int i = 0; i < initialNodeCount; i++) { - final ClusterNode clusterNode = new ClusterNode(i, allNodesMasterEligible || i == 0 || randomBoolean()); + final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(), + allNodesMasterEligible || i == 0 || randomBoolean()); clusterNodes.add(clusterNode); if (clusterNode.getLocalNode().isMasterNode()) { masterEligibleNodeIds.add(clusterNode.getId()); @@ -1108,10 +1169,9 @@ List addNodesAndStabilise(int newNodesCount) { List addNodes(int newNodesCount) { logger.info("--> adding {} nodes", newNodesCount); - final int nodeSizeAtStart = clusterNodes.size(); final List addedNodes = new ArrayList<>(); for (int i = 0; i < newNodesCount; i++) { - final ClusterNode clusterNode = new ClusterNode(nodeSizeAtStart + i, true); + final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(), true); addedNodes.add(clusterNode); } clusterNodes.addAll(addedNodes); @@ -1471,21 +1531,41 @@ class MockPersistedState implements PersistedState { } } - MockPersistedState(DiscoveryNode newLocalNode, MockPersistedState oldState) { + MockPersistedState(DiscoveryNode newLocalNode, MockPersistedState oldState, + Function adaptGlobalMetaData, Function adaptCurrentTerm) { try { if (oldState.nodeEnvironment != null) { nodeEnvironment = oldState.nodeEnvironment; + final MetaStateService metaStateService = new MetaStateService(nodeEnvironment, xContentRegistry()); + final MetaData updatedMetaData = adaptGlobalMetaData.apply(oldState.getLastAcceptedState().metaData()); + if (updatedMetaData != oldState.getLastAcceptedState().metaData()) { + metaStateService.writeGlobalStateAndUpdateManifest("update global state", updatedMetaData); + } + final long updatedTerm = adaptCurrentTerm.apply(oldState.getCurrentTerm()); + if (updatedTerm != oldState.getCurrentTerm()) { + final Manifest manifest = metaStateService.loadManifestOrEmpty(); + metaStateService.writeManifestAndCleanup("update term", + new Manifest(updatedTerm, manifest.getClusterStateVersion(), manifest.getGlobalGeneration(), + manifest.getIndexGenerations())); + } delegate = new MockGatewayMetaState(Settings.EMPTY, nodeEnvironment, xContentRegistry(), newLocalNode) .getPersistedState(Settings.EMPTY, null); } else { nodeEnvironment = null; BytesStreamOutput outStream = new BytesStreamOutput(); outStream.setVersion(Version.CURRENT); - oldState.getLastAcceptedState().writeTo(outStream); + final MetaData updatedMetaData = adaptGlobalMetaData.apply(oldState.getLastAcceptedState().metaData()); + final ClusterState clusterState; + if (updatedMetaData != oldState.getLastAcceptedState().metaData()) { + clusterState = ClusterState.builder(oldState.getLastAcceptedState()).metaData(updatedMetaData).build(); + } else { + clusterState = oldState.getLastAcceptedState(); + } + clusterState.writeTo(outStream); StreamInput inStream = new NamedWriteableAwareStreamInput(outStream.bytes().streamInput(), new NamedWriteableRegistry(ClusterModule.getNamedWriteables())); - delegate = new InMemoryPersistedState(oldState.getCurrentTerm(), ClusterState.readFrom(inStream, - newLocalNode)); // adapts it to new localNode instance + delegate = new InMemoryPersistedState(adaptCurrentTerm.apply(oldState.getCurrentTerm()), + ClusterState.readFrom(inStream, newLocalNode)); // adapts it to new localNode instance } } catch (IOException e) { throw new UncheckedIOException("Unable to create MockPersistedState", e); @@ -1614,12 +1694,17 @@ void close() { } ClusterNode restartedNode() { + return restartedNode(Function.identity(), Function.identity()); + } + + ClusterNode restartedNode(Function adaptGlobalMetaData, Function adaptCurrentTerm) { final TransportAddress address = randomBoolean() ? buildNewFakeTransportAddress() : localNode.getAddress(); final DiscoveryNode newLocalNode = new DiscoveryNode(localNode.getName(), localNode.getId(), UUIDs.randomBase64UUID(random()), // generated deterministically for repeatable tests address.address().getHostString(), address.getAddress(), address, Collections.emptyMap(), localNode.isMasterNode() ? EnumSet.allOf(Role.class) : emptySet(), Version.CURRENT); - return new ClusterNode(nodeIndex, newLocalNode, node -> new MockPersistedState(newLocalNode, persistedState)); + return new ClusterNode(nodeIndex, newLocalNode, + node -> new MockPersistedState(newLocalNode, persistedState, adaptGlobalMetaData, adaptCurrentTerm)); } private PersistedState getPersistedState() { diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java index ef843717fb469..4361660876c7a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java @@ -43,7 +43,7 @@ public void testJoinDeduplication() { TransportService transportService = capturingTransport.createTransportService(Settings.EMPTY, deterministicTaskQueue.getThreadPool(), TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> localNode, null, Collections.emptySet()); - JoinHelper joinHelper = new JoinHelper(Settings.EMPTY, null, null, transportService, () -> 0L, + JoinHelper joinHelper = new JoinHelper(Settings.EMPTY, null, null, transportService, () -> 0L, () -> null, (joinRequest, joinCallback) -> { throw new AssertionError(); }, startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList()); transportService.start(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java index 5dcfccaea5874..685b7cca98a94 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java @@ -411,6 +411,43 @@ public void testXContentWithIndexGraveyard() throws IOException { } } + public void testXContentClusterUUID() throws IOException { + final MetaData originalMeta = MetaData.builder().clusterUUID(UUIDs.randomBase64UUID()) + .clusterUUIDCommitted(randomBoolean()).build(); + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + originalMeta.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + final MetaData fromXContentMeta = MetaData.fromXContent(parser); + assertThat(fromXContentMeta.clusterUUID(), equalTo(originalMeta.clusterUUID())); + assertThat(fromXContentMeta.clusterUUIDCommitted(), equalTo(originalMeta.clusterUUIDCommitted())); + } + } + + public void testSerializationClusterUUID() throws IOException { + final MetaData originalMeta = MetaData.builder().clusterUUID(UUIDs.randomBase64UUID()) + .clusterUUIDCommitted(randomBoolean()).build(); + final BytesStreamOutput out = new BytesStreamOutput(); + originalMeta.writeTo(out); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); + final MetaData fromStreamMeta = MetaData.readFrom( + new NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry) + ); + assertThat(fromStreamMeta.clusterUUID(), equalTo(originalMeta.clusterUUID())); + assertThat(fromStreamMeta.clusterUUIDCommitted(), equalTo(originalMeta.clusterUUIDCommitted())); + } + + public void testMetaDataGlobalStateChangesOnClusterUUIDChanges() { + final MetaData metaData1 = MetaData.builder().clusterUUID(UUIDs.randomBase64UUID()).clusterUUIDCommitted(randomBoolean()).build(); + final MetaData metaData2 = MetaData.builder(metaData1).clusterUUID(UUIDs.randomBase64UUID()).build(); + final MetaData metaData3 = MetaData.builder(metaData1).clusterUUIDCommitted(!metaData1.clusterUUIDCommitted()).build(); + assertFalse(MetaData.isGlobalStateEquals(metaData1, metaData2)); + assertFalse(MetaData.isGlobalStateEquals(metaData1, metaData3)); + final MetaData metaData4 = MetaData.builder(metaData2).clusterUUID(metaData1.clusterUUID()).build(); + assertTrue(MetaData.isGlobalStateEquals(metaData1, metaData4)); + } + private static CoordinationMetaData.VotingConfiguration randomVotingConfig() { return new CoordinationMetaData.VotingConfiguration(Sets.newHashSet(generateRandomStringArray(randomInt(10), 20, false))); } diff --git a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index d94c34c7b33eb..330c73b9c02c5 100644 --- a/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.cluster.routing.ShardRouting; @@ -377,6 +378,33 @@ public boolean clearData(String nodeName) { assertTrue(client().prepareGet("index", "_doc", "1").get().isExists()); } + public void testCannotJoinIfMasterLostDataFolder() throws Exception { + String masterNode = internalCluster().startMasterOnlyNode(); + String dataNode = internalCluster().startDataOnlyNode(); + + internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback() { + @Override + public boolean clearData(String nodeName) { + return true; + } + + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder().put(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), nodeName).build(); + } + + @Override + public boolean validateClusterForming() { + return false; + } + }); + + assertFalse(internalCluster().client(masterNode).admin().cluster().prepareHealth().get().isTimedOut()); + assertTrue(internalCluster().client(masterNode).admin().cluster().prepareHealth().setWaitForNodes("2").setTimeout("2s").get() + .isTimedOut()); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataNode)); // otherwise we will fail during clean-up + } + /** * Tests that indices are properly deleted even if there is a master transition in between. * Test for https://github.com/elastic/elasticsearch/issues/11665 diff --git a/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java index b34bcf87bdbd8..cae33db90a6bc 100644 --- a/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java @@ -263,12 +263,12 @@ public void testMixCurrentAndRecoveredState() { .blocks(ClusterBlocks.builder().addGlobalBlock(CLUSTER_READ_ONLY_BLOCK).build()) .metaData(metaData) .build(); - assertThat(recoveredState.metaData().clusterUUID(), equalTo("_na_")); + assertThat(recoveredState.metaData().clusterUUID(), equalTo(MetaData.UNKNOWN_CLUSTER_UUID)); final ClusterState updatedState = mixCurrentStateAndRecoveredState(currentState, recoveredState); - assertThat(updatedState.metaData().clusterUUID(), not(equalTo("_na_"))); - assertTrue(MetaData.isGlobalStateEquals(metaData, updatedState.metaData())); + assertThat(updatedState.metaData().clusterUUID(), not(equalTo(MetaData.UNKNOWN_CLUSTER_UUID))); + assertFalse(MetaData.isGlobalStateEquals(metaData, updatedState.metaData())); assertThat(updatedState.metaData().index("test"), equalTo(indexMetaData)); assertTrue(updatedState.blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK)); assertTrue(updatedState.blocks().hasGlobalBlock(CLUSTER_READ_ONLY_BLOCK)); diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java index 921bcac3d4c65..8ccfa5e406ae2 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -213,22 +213,24 @@ public void testMarkAcceptedConfigAsCommitted() throws IOException { } while (coordinationMetaData.getLastAcceptedConfiguration().equals(coordinationMetaData.getLastCommittedConfiguration())); ClusterState state = createClusterState(randomNonNegativeLong(), - MetaData.builder().coordinationMetaData(coordinationMetaData).build()); + MetaData.builder().coordinationMetaData(coordinationMetaData) + .clusterUUID(randomAlphaOfLength(10)).build()); gateway.setLastAcceptedState(state); gateway = maybeNew(gateway); assertThat(gateway.getLastAcceptedState().getLastAcceptedConfiguration(), not(equalTo(gateway.getLastAcceptedState().getLastCommittedConfiguration()))); - gateway.markLastAcceptedConfigAsCommitted(); + gateway.markLastAcceptedStateAsCommitted(); CoordinationMetaData expectedCoordinationMetaData = CoordinationMetaData.builder(coordinationMetaData) .lastCommittedConfiguration(coordinationMetaData.getLastAcceptedConfiguration()).build(); ClusterState expectedClusterState = - ClusterState.builder(state).metaData(MetaData.builder().coordinationMetaData(expectedCoordinationMetaData).build()).build(); + ClusterState.builder(state).metaData(MetaData.builder().coordinationMetaData(expectedCoordinationMetaData) + .clusterUUID(state.metaData().clusterUUID()).clusterUUIDCommitted(true).build()).build(); gateway = maybeNew(gateway); assertClusterStateEqual(expectedClusterState, gateway.getLastAcceptedState()); - gateway.markLastAcceptedConfigAsCommitted(); + gateway.markLastAcceptedStateAsCommitted(); gateway = maybeNew(gateway); assertClusterStateEqual(expectedClusterState, gateway.getLastAcceptedState());