Skip to content

Commit

Permalink
Add deprecation info and warnings for an empty TIER_PREFERENCE (elast…
Browse files Browse the repository at this point in the history
  • Loading branch information
joegallo authored Oct 19, 2021
1 parent 159cb44 commit 679beda
Show file tree
Hide file tree
Showing 11 changed files with 253 additions and 67 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -814,16 +814,26 @@ static Settings aggregateIndexSettings(ClusterState currentState, CreateIndexClu
indexSettingsBuilder.put(requestSettings.build());

if (sourceMetadata == null) { // not for shrink/split/clone
String currentTierPreference = indexSettingsBuilder.get(DataTier.TIER_PREFERENCE);
List<String> preferredTiers = DataTier.parseTierList(currentTierPreference);
if (enforceDefaultTierPreference) {
// regardless of any previous logic, we're going to force there
// to be an appropriate non-empty value for the tier preference
String currentTierPreference = indexSettingsBuilder.get(DataTier.TIER_PREFERENCE);
if (DataTier.parseTierList(currentTierPreference).isEmpty()) {
if (preferredTiers.isEmpty()) {
String newTierPreference = isDataStreamIndex ? DataTier.DATA_HOT : DataTier.DATA_CONTENT;
logger.debug("enforcing default [{}] setting for [{}] creation, replacing [{}] with [{}]",
DataTier.TIER_PREFERENCE, request.index(), currentTierPreference, newTierPreference);
indexSettingsBuilder.put(DataTier.TIER_PREFERENCE, newTierPreference);
}
} else {
// if we're not enforcing the tier preference, then maybe warn
if (preferredTiers.isEmpty()) {
if (DataTier.dataNodesWithoutAllDataRoles(currentState).isEmpty() == false) {
DEPRECATION_LOGGER.warn(DeprecationCategory.INDICES, "index_without_tier_preference",
"[{}] creating index with an empty [{}] setting, in 8.0 this setting will be required for all indices",
request.index(), DataTier.TIER_PREFERENCE);
}
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeRole;
Expand All @@ -28,6 +29,7 @@
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;

/**
* The {@code DataTier} class encapsulates the formalization of the "content",
Expand Down Expand Up @@ -266,4 +268,26 @@ public Iterator<Setting<?>> settings() {
return dependencies.iterator();
}
}

/**
* Checks each data node in the cluster state to see whether it has the explicit data role or if it has
* all data tiers (e.g. 'data_hot', 'data_warm', etc). The former condition being treated as a shortcut
* for the latter condition (see DataTierAllocationDecider#allocationAllowed(String, Set)) for details,
* as well as the various DataTier#isFooNode(DiscoveryNode) methods.
*
* @param clusterState the cluster state
* @return a set of data nodes that do not have all data roles
*/
public static Set<DiscoveryNode> dataNodesWithoutAllDataRoles(ClusterState clusterState) {
return clusterState.getNodes().getDataNodes().values().stream()
.filter(node -> {
Set<String> roles = node.getRoles().stream()
.map(DiscoveryNodeRole::roleName)
.collect(Collectors.toSet());

return roles.contains(DiscoveryNodeRole.DATA_ROLE.roleName()) == false &&
roles.containsAll(ALL_DATA_TIERS) == false;
})
.collect(Collectors.toSet());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.DataTier;
import org.elasticsearch.cluster.routing.allocation.DataTierTests;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
Expand Down Expand Up @@ -996,13 +997,24 @@ private Optional<String> aggregatedTierPreference(Settings settings, boolean isD
} else {
request.dataStreamName(null);
}
Settings aggregatedIndexSettings = aggregateIndexSettings(ClusterState.EMPTY_STATE, request, templateSettings,

ClusterState clusterState = ClusterState.EMPTY_STATE;
if (randomBoolean()) {
clusterState = DataTierTests.clusterStateWithoutAllDataRoles();
}

Settings aggregatedIndexSettings = aggregateIndexSettings(clusterState, request, templateSettings,
null, Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(),
org.elasticsearch.core.Set.of(new DataTier.DefaultHotAllocationSettingProvider()), enforceDefaultTierPreference);

if (aggregatedIndexSettings.keySet().contains(DataTier.TIER_PREFERENCE)) {
return Optional.of(aggregatedIndexSettings.get(DataTier.TIER_PREFERENCE));
} else {
if (clusterState != ClusterState.EMPTY_STATE) {
assertWarnings("[" + request.index() + "] creating index with " +
"an empty [" + DataTier.TIER_PREFERENCE + "] setting, in 8.0 this setting will be required for all indices");
}

return Optional.empty();
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
package org.elasticsearch.cluster.routing.allocation;

import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeRole;
import org.elasticsearch.cluster.node.DiscoveryNodes;
Expand All @@ -21,20 +22,27 @@

import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;

import static org.elasticsearch.cluster.routing.allocation.DataTier.ALL_DATA_TIERS;
import static org.elasticsearch.cluster.routing.allocation.DataTier.DATA_COLD;
import static org.elasticsearch.cluster.routing.allocation.DataTier.DATA_HOT;
import static org.elasticsearch.cluster.routing.allocation.DataTier.DATA_WARM;
import static org.elasticsearch.cluster.routing.allocation.DataTier.getPreferredTiersConfiguration;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
import static org.hamcrest.Matchers.both;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasProperty;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.not;

public class DataTierTests extends ESTestCase {
Expand Down Expand Up @@ -139,6 +147,41 @@ public void testGetPreferredTiersConfiguration() {
assertThat(exception.getMessage(), is("invalid data tier [no_tier]"));
}

public void testDataNodesWithoutAllDataRoles() {
ClusterState clusterState = clusterStateWithoutAllDataRoles();
Set<DiscoveryNode> nodes = DataTier.dataNodesWithoutAllDataRoles(clusterState);
assertThat(nodes, hasSize(2));
assertThat(nodes, hasItem(both(hasProperty("name", is("name_3")))
.and(hasProperty("roles", contains(DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE)))));
assertThat(nodes, hasItem(hasProperty("name", is("name_4"))));
}

public static ClusterState clusterStateWithoutAllDataRoles() {
Set<DiscoveryNodeRole> allDataRoles = new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES).stream()
.filter(role -> ALL_DATA_TIERS.contains(role.roleName())).collect(Collectors.toSet());

Collection<String> allButOneDataTiers = randomValueOtherThan(ALL_DATA_TIERS,
() -> randomSubsetOf(randomIntBetween(1, ALL_DATA_TIERS.size() - 1), ALL_DATA_TIERS));
Set<DiscoveryNodeRole> allButOneDataRoles = new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES).stream()
.filter(role -> allButOneDataTiers.contains(role.roleName())).collect(Collectors.toSet());

DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
List<DiscoveryNode> nodesList = org.elasticsearch.core.List.of(
newNode(0, org.elasticsearch.core.Map.of(), org.elasticsearch.core.Set.of(DiscoveryNodeRole.MASTER_ROLE)),
newNode(1, org.elasticsearch.core.Map.of(), org.elasticsearch.core.Set.of(DiscoveryNodeRole.DATA_ROLE)),
newNode(2, org.elasticsearch.core.Map.of(), allDataRoles),
newNode(3, org.elasticsearch.core.Map.of(), org.elasticsearch.core.Set.of(DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE)),
newNode(4, org.elasticsearch.core.Map.of(), allButOneDataRoles)
);
for (DiscoveryNode node : nodesList) {
discoBuilder = discoBuilder.add(node);
}
discoBuilder.localNodeId(randomFrom(nodesList).getId());
discoBuilder.masterNodeId(randomFrom(nodesList).getId());

return ClusterState.builder(ClusterState.EMPTY_STATE).nodes(discoBuilder.build()).build();
}

private static DiscoveryNodes buildDiscoveryNodes() {
int numNodes = randomIntBetween(3, 15);
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,8 @@ public void testRandomClusterPromotesNewestReplica() throws InterruptedException
String name = "index_" + randomAlphaOfLength(8).toLowerCase(Locale.ROOT);
Settings.Builder settingsBuilder = Settings.builder()
.put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 4))
.put(SETTING_NUMBER_OF_REPLICAS, randomIntBetween(2, 4));
.put(SETTING_NUMBER_OF_REPLICAS, randomIntBetween(2, 4))
.put(DataTier.TIER_PREFERENCE, DataTier.DATA_CONTENT);
CreateIndexRequest request = new CreateIndexRequest(name, settingsBuilder.build()).waitForActiveShards(ActiveShardCount.NONE);
state = cluster.createIndex(state, request);
assertTrue(state.metadata().hasIndex(name));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.DataTier;
import org.elasticsearch.cluster.routing.allocation.FailedShard;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.UUIDs;
Expand Down Expand Up @@ -341,7 +342,8 @@ public ClusterState randomlyUpdateClusterState(ClusterState state,
}
String name = "index_" + randomAlphaOfLength(15).toLowerCase(Locale.ROOT);
Settings.Builder settingsBuilder = Settings.builder()
.put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3));
.put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3))
.put(DataTier.TIER_PREFERENCE, DataTier.DATA_CONTENT);
if (randomBoolean()) {
int min = randomInt(2);
int max = min + randomInt(3);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,15 @@
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.xpack.core.deprecation.DeprecationIssue;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.core.XPackSettings;
import org.elasticsearch.xpack.core.deprecation.DeprecationIssue;

import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
Expand Down Expand Up @@ -135,24 +136,25 @@ private DeprecationChecks() {
).collect(Collectors.toList());
}

static List<Function<IndexMetadata, DeprecationIssue>> INDEX_SETTINGS_CHECKS =
static List<BiFunction<ClusterState, IndexMetadata, DeprecationIssue>> INDEX_SETTINGS_CHECKS =
Collections.unmodifiableList(Arrays.asList(
IndexDeprecationChecks::oldIndicesCheck,
IndexDeprecationChecks::tooManyFieldsCheck,
IndexDeprecationChecks::chainedMultiFieldsCheck,
IndexDeprecationChecks::deprecatedDateTimeFormat,
IndexDeprecationChecks::translogRetentionSettingCheck,
IndexDeprecationChecks::fieldNamesDisabledCheck,
IndexDeprecationChecks::checkIndexDataPath,
IndexDeprecationChecks::indexingSlowLogLevelSettingCheck,
IndexDeprecationChecks::searchSlowLogLevelSettingCheck,
IndexDeprecationChecks::storeTypeSettingCheck,
IndexDeprecationChecks::checkIndexRoutingRequireSetting,
IndexDeprecationChecks::checkIndexRoutingIncludeSetting,
IndexDeprecationChecks::checkIndexRoutingExcludeSetting,
IndexDeprecationChecks::checkIndexMatrixFiltersSetting,
IndexDeprecationChecks::checkGeoShapeMappings,
IndexDeprecationChecks::frozenIndexSettingCheck
(clusterState, indexMetadata) -> IndexDeprecationChecks.oldIndicesCheck(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.tooManyFieldsCheck(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.chainedMultiFieldsCheck(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.deprecatedDateTimeFormat(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.translogRetentionSettingCheck(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.fieldNamesDisabledCheck(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.checkIndexDataPath(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.indexingSlowLogLevelSettingCheck(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.searchSlowLogLevelSettingCheck(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.storeTypeSettingCheck(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.checkIndexRoutingRequireSetting(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.checkIndexRoutingIncludeSetting(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.checkIndexRoutingExcludeSetting(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.checkIndexMatrixFiltersSetting(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.checkGeoShapeMappings(indexMetadata),
(clusterState, indexMetadata) -> IndexDeprecationChecks.frozenIndexSettingCheck(indexMetadata),
IndexDeprecationChecks::emptyDataTierPreferenceCheck
));

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,9 @@
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.xcontent.ToXContentObject;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.xpack.core.deprecation.DeprecationIssue;

import java.io.IOException;
Expand All @@ -39,6 +39,7 @@
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.stream.Collectors;

Expand Down Expand Up @@ -198,14 +199,16 @@ public int hashCode() {
* @param clusterSettingsChecks The list of cluster-level checks
* @return The list of deprecation issues found in the cluster
*/
public static DeprecationInfoAction.Response from(ClusterState state,
IndexNameExpressionResolver indexNameExpressionResolver,
Request request,
NodesDeprecationCheckResponse nodeDeprecationResponse,
List<Function<IndexMetadata, DeprecationIssue>> indexSettingsChecks,
List<Function<ClusterState, DeprecationIssue>> clusterSettingsChecks,
Map<String, List<DeprecationIssue>> pluginSettingIssues,
List<String> skipTheseDeprecatedSettings) {
public static DeprecationInfoAction.Response from(
ClusterState state,
IndexNameExpressionResolver indexNameExpressionResolver,
Request request,
NodesDeprecationCheckResponse nodeDeprecationResponse,
List<BiFunction<ClusterState, IndexMetadata, DeprecationIssue>> indexSettingsChecks,
List<Function<ClusterState, DeprecationIssue>> clusterSettingsChecks,
Map<String, List<DeprecationIssue>> pluginSettingIssues,
List<String> skipTheseDeprecatedSettings
) {
// Allow system index access here to prevent deprecation warnings when we call this API
String[] concreteIndexNames = indexNameExpressionResolver.concreteIndexNamesWithSystemIndexAccess(state, request);
ClusterState stateWithSkippedSettingsRemoved = removeSkippedSettings(state, concreteIndexNames, skipTheseDeprecatedSettings);
Expand All @@ -217,7 +220,7 @@ public static DeprecationInfoAction.Response from(ClusterState state,
for (String concreteIndex : concreteIndexNames) {
IndexMetadata indexMetadata = stateWithSkippedSettingsRemoved.getMetadata().index(concreteIndex);
List<DeprecationIssue> singleIndexIssues = filterChecks(indexSettingsChecks,
c -> c.apply(indexMetadata));
c -> c.apply(state, indexMetadata));
if (singleIndexIssues.size() > 0) {
indexSettingsIssues.put(concreteIndex, singleIndexIssues);
}
Expand Down
Loading

0 comments on commit 679beda

Please sign in to comment.