diff --git a/docs/reference/ilm/actions/ilm-shrink.asciidoc b/docs/reference/ilm/actions/ilm-shrink.asciidoc index e29b975771aac..727124a18ea43 100644 --- a/docs/reference/ilm/actions/ilm-shrink.asciidoc +++ b/docs/reference/ilm/actions/ilm-shrink.asciidoc @@ -17,6 +17,11 @@ stream. You cannot perform the `shrink` action on a write index. To use the `shrink` action in the `hot` phase, the `rollover` action *must* be present. If no rollover action is configured, {ilm-init} will reject the policy. +The shrink action will unset the index's index.routing.allocation.total_shards_per_node +setting, meaning that there will be no limit. This is to ensure that all shards of the +index can be copied to a single node. This setting change will persist on the index +even after the step completes. + [IMPORTANT] If the shrink action is used on a <>, policy execution waits until the leader index rolls over (or is < equalTo(e)).collect(Collectors.toList()))); if (assertOnlyKeyInSettings) { - assertEquals(1, request.settings().size()); + assertEquals(2, request.settings().size()); } } diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/ShrinkActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/ShrinkActionIT.java index a7ea6df5a8a64..ba51200af4187 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/ShrinkActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/ShrinkActionIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -302,4 +303,33 @@ public void testAutomaticRetryFailedShrinkAction() throws Exception { }); expectThrows(ResponseException.class, () -> indexDocument(client(), index)); } + + /* + * This test verifies that we can still shrink an index even if the total number of shards in the index is greater than + * index.routing.allocation.total_shards_per_node. + */ + public void testTotalShardsPerNodeTooLow() throws Exception { + int numShards = 4; + int divisor = randomFrom(2, 4); + int expectedFinalShards = numShards / divisor; + createIndexWithSettings(client(), index, alias, Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), numShards - 2)); + createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(expectedFinalShards, null)); + updatePolicy(client(), index, policy); + + String shrunkenIndexName = waitAndGetShrinkIndexName(client(), index); + assertBusy(() -> assertTrue(indexExists(shrunkenIndexName)), 60, TimeUnit.SECONDS); + assertBusy(() -> assertTrue(aliasExists(shrunkenIndexName, index))); + assertBusy(() -> assertThat(getStepKeyForIndex(client(), shrunkenIndexName), + equalTo(PhaseCompleteStep.finalStep("warm").getKey()))); + assertBusy(() -> { + Map settings = getOnlyIndexSettings(client(), shrunkenIndexName); + assertThat(settings.get(SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards))); + assertThat(settings.get(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); + assertThat(settings.get(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id"), nullValue()); + assertNull(settings.get(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey())); + }); + expectThrows(ResponseException.class, () -> indexDocument(client(), index)); + } }