Skip to content

Commit

Permalink
Remove `cluster.routing.allocation.disk.watermark.enable_for_single_d…
Browse files Browse the repository at this point in the history
…ata_node` setting

Prior to 7.8, whenever a cluster had only a single data node, the watermarks would not be respected. This was incompatible with
how storage based autoscaling on ESS/ECH works and in order to change this in 7.8+ in a backwards compatible way, we had to
introduce the `cluster.routing.allocation.disk.watermark.enable_for_single_data_node node` setting. The setting was deprecated
in elastic#73733 (7.14), and was made to accept only `true` in
elastic#73737 (8.0).
  • Loading branch information
arteam committed Oct 7, 2024
1 parent 4ef5ea6 commit ea54648
Show file tree
Hide file tree
Showing 7 changed files with 1 addition and 107 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,7 @@
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.snapshots.SnapshotShardSizeInfo;

import java.util.Map;
Expand Down Expand Up @@ -72,25 +70,6 @@ public class DiskThresholdDecider extends AllocationDecider {

public static final String NAME = "disk_threshold";

@UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION)
public static final Setting<Boolean> ENABLE_FOR_SINGLE_DATA_NODE = Setting.boolSetting(
"cluster.routing.allocation.disk.watermark.enable_for_single_data_node",
true,
new Setting.Validator<>() {
@Override
public void validate(Boolean value) {
if (value == Boolean.FALSE) {
throw new SettingsException(
"setting [{}=false] is not allowed, only true is valid",
ENABLE_FOR_SINGLE_DATA_NODE.getKey()
);
}
}
},
Setting.Property.NodeScope,
Setting.Property.DeprecatedWarning
);

public static final Setting<Boolean> SETTING_IGNORE_DISK_WATERMARKS = Setting.boolSetting(
"index.routing.allocation.disk.watermark.ignore",
false,
Expand All @@ -102,9 +81,6 @@ public void validate(Boolean value) {

public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings) {
this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings);
// get deprecation warnings.
boolean enabledForSingleDataNode = ENABLE_FOR_SINGLE_DATA_NODE.get(settings);
assert enabledForSingleDataNode;
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@
import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
Expand Down Expand Up @@ -269,7 +268,6 @@ public void apply(Settings value, Settings current, Settings previous) {
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_MAX_HEADROOM_SETTING,
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,6 @@
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.index.Index;
Expand Down Expand Up @@ -1070,9 +1068,6 @@ private void doTestWatermarksEnabledForSingleDataNode(boolean testMaxHeadroom) {
ByteSizeValue.ofGb(110).toString()
);
}
if (randomBoolean()) {
builder = builder.put(DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE.getKey(), true);
}
Settings diskSettings = builder.build();

final long totalBytes = testMaxHeadroom ? ByteSizeValue.ofGb(10000).getBytes() : 100;
Expand Down Expand Up @@ -1151,10 +1146,6 @@ private void doTestWatermarksEnabledForSingleDataNode(boolean testMaxHeadroom) {
+ "on node, actual free: [20b], actual used: [80%]"
)
);

if (DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE.exists(diskSettings)) {
assertSettingDeprecationsAndWarnings(new Setting<?>[] { DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE });
}
}

public void testWatermarksEnabledForSingleDataNodeWithPercentages() {
Expand All @@ -1165,25 +1156,6 @@ public void testWatermarksEnabledForSingleDataNodeWithMaxHeadroom() {
doTestWatermarksEnabledForSingleDataNode(true);
}

public void testSingleDataNodeDeprecationWarning() {
Settings settings = Settings.builder().put(DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE.getKey(), false).build();

IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> new DiskThresholdDecider(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS))
);

assertThat(
e.getCause().getMessage(),
equalTo(
"setting [cluster.routing.allocation.disk.watermark.enable_for_single_data_node=false] is not allowed,"
+ " only true is valid"
)
);

assertSettingDeprecationsAndWarnings(new Setting<?>[] { DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE });
}

private void doTestDiskThresholdWithSnapshotShardSizes(boolean testMaxHeadroom) {
final long shardSizeInBytes = randomBoolean()
? (testMaxHeadroom ? ByteSizeValue.ofGb(99).getBytes() : 10L) // fits free space of node1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,7 @@ protected Settings nodeSettings(final int nodeOrdinal, final Settings otherSetti
builder.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), LOW_WATERMARK_BYTES + "b")
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), HIGH_WATERMARK_BYTES + "b")
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "0b")
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "0ms")
.put(DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE.getKey(), "true");
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "0ms");
return builder.build();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ private DeprecationChecks() {}
NodeDeprecationChecks::checkDataPathsList,
NodeDeprecationChecks::checkSharedDataPathSetting,
NodeDeprecationChecks::checkReservedPrefixedRealmNames,
NodeDeprecationChecks::checkSingleDataNodeWatermarkSetting,
NodeDeprecationChecks::checkExporterUseIngestPipelineSettings,
NodeDeprecationChecks::checkExporterPipelineMasterTimeoutSetting,
NodeDeprecationChecks::checkExporterCreateLegacyTemplateSetting,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.allocation.DataTier;
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
import org.elasticsearch.common.settings.SecureSetting;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
Expand Down Expand Up @@ -216,28 +215,6 @@ static DeprecationIssue checkReservedPrefixedRealmNames(
}
}

static DeprecationIssue checkSingleDataNodeWatermarkSetting(
final Settings settings,
final PluginsAndModules pluginsAndModules,
final ClusterState clusterState,
final XPackLicenseState licenseState
) {
if (DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE.exists(settings)) {
String key = DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE.getKey();
return new DeprecationIssue(
DeprecationIssue.Level.CRITICAL,
String.format(Locale.ROOT, "setting [%s] is deprecated and will not be available in a future version", key),
"https://www.elastic.co/guide/en/elasticsearch/reference/7.14/"
+ "breaking-changes-7.14.html#deprecate-single-data-node-watermark",
String.format(Locale.ROOT, "found [%s] configured. Discontinue use of this setting.", key),
false,
null
);
}

return null;
}

private static DeprecationIssue deprecatedAffixSetting(
Setting.AffixSetting<?> deprecatedAffixSetting,
String detailPattern,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -211,33 +211,6 @@ public void testCheckReservedPrefixedRealmNames() {
);
}

public void testSingleDataNodeWatermarkSetting() {
Settings settings = Settings.builder().put(DiskThresholdDecider.ENABLE_FOR_SINGLE_DATA_NODE.getKey(), true).build();

List<DeprecationIssue> issues = DeprecationChecks.filterChecks(
NODE_SETTINGS_CHECKS,
c -> c.apply(settings, null, ClusterState.EMPTY_STATE, new XPackLicenseState(() -> 0))
);

final String expectedUrl = "https://www.elastic.co/guide/en/elasticsearch/reference/7.14/"
+ "breaking-changes-7.14.html#deprecate-single-data-node-watermark";
assertThat(
issues,
hasItem(
new DeprecationIssue(
DeprecationIssue.Level.CRITICAL,
"setting [cluster.routing.allocation.disk.watermark.enable_for_single_data_node] is deprecated and"
+ " will not be available in a future version",
expectedUrl,
"found [cluster.routing.allocation.disk.watermark.enable_for_single_data_node] configured."
+ " Discontinue use of this setting.",
false,
null
)
)
);
}

void monitoringSetting(String settingKey, String value) {
Settings settings = Settings.builder().put(settingKey, value).build();
List<DeprecationIssue> issues = DeprecationChecks.filterChecks(
Expand Down

0 comments on commit ea54648

Please sign in to comment.