diff --git a/docs/changelog/115510.yaml b/docs/changelog/115510.yaml new file mode 100644 index 0000000000000..1e71270e18f97 --- /dev/null +++ b/docs/changelog/115510.yaml @@ -0,0 +1,6 @@ +pr: 115510 +summary: Fix lingering license warning header in IP filter +area: License +type: bug +issues: + - 114865 diff --git a/docs/changelog/115807.yaml b/docs/changelog/115807.yaml new file mode 100644 index 0000000000000..d17cabca4bd03 --- /dev/null +++ b/docs/changelog/115807.yaml @@ -0,0 +1,5 @@ +pr: 115807 +summary: "[Inference API] Improve chunked results error message" +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/115823.yaml b/docs/changelog/115823.yaml new file mode 100644 index 0000000000000..a6119e0fa56e4 --- /dev/null +++ b/docs/changelog/115823.yaml @@ -0,0 +1,5 @@ +pr: 115823 +summary: Add ECK Role Mapping Cleanup +area: Security +type: bug +issues: [] diff --git a/docs/changelog/115831.yaml b/docs/changelog/115831.yaml new file mode 100644 index 0000000000000..18442ec3b97e6 --- /dev/null +++ b/docs/changelog/115831.yaml @@ -0,0 +1,13 @@ +pr: 115831 +summary: Increase minimum threshold in shard balancer +area: Allocation +type: breaking +issues: [] +breaking: + title: Minimum shard balancer threshold is now 1.0 + area: Cluster and node setting + details: >- + Earlier versions of {es} accepted any non-negative value for `cluster.routing.allocation.balance.threshold`, but values smaller than + `1.0` do not make sense and have been ignored since version 8.6.1. From 9.0.0 these nonsensical values are now forbidden. + impact: Do not set `cluster.routing.allocation.balance.threshold` to a value less than `1.0`. + notable: false diff --git a/docs/changelog/115834.yaml b/docs/changelog/115834.yaml new file mode 100644 index 0000000000000..91f9e9a4e2e41 --- /dev/null +++ b/docs/changelog/115834.yaml @@ -0,0 +1,5 @@ +pr: 115834 +summary: Try to simplify geometries that fail with `TopologyException` +area: Geo +type: bug +issues: [] diff --git a/docs/changelog/115836.yaml b/docs/changelog/115836.yaml new file mode 100644 index 0000000000000..f6da638f1feff --- /dev/null +++ b/docs/changelog/115836.yaml @@ -0,0 +1,5 @@ +pr: 115836 +summary: Catch and handle disconnect exceptions in search +area: Search +type: bug +issues: [] diff --git a/muted-tests.yml b/muted-tests.yml index bbe2fa91c8de9..12004346b5097 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -224,8 +224,6 @@ tests: - class: org.elasticsearch.xpack.remotecluster.RemoteClusterSecurityWithApmTracingRestIT method: testTracingCrossCluster issue: https://github.com/elastic/elasticsearch/issues/112731 -- class: org.elasticsearch.license.LicensingTests - issue: https://github.com/elastic/elasticsearch/issues/114865 - class: org.elasticsearch.packaging.test.EnrollmentProcessTests method: test20DockerAutoFormCluster issue: https://github.com/elastic/elasticsearch/issues/114885 @@ -238,9 +236,6 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultE5 issue: https://github.com/elastic/elasticsearch/issues/115361 -- class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests - method: testProcessFileChanges - issue: https://github.com/elastic/elasticsearch/issues/115280 - class: org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT method: testDeploymentSurvivesRestart {cluster=UPGRADED} issue: https://github.com/elastic/elasticsearch/issues/115528 diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 92a704f793dc2..fcca3f9a4700c 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -16,9 +16,11 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.Build; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.common.Strings; @@ -27,6 +29,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.CheckedFunction; +import org.elasticsearch.core.UpdateForV10; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -72,6 +75,7 @@ import static java.util.stream.Collectors.toList; import static org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SYSTEM_INDEX_ENFORCEMENT_INDEX_VERSION; import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; +import static org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.THRESHOLD_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; @@ -1949,4 +1953,35 @@ public static void assertNumHits(String index, int numHits, int totalShards) thr assertThat(XContentMapValues.extractValue("_shards.successful", resp), equalTo(totalShards)); assertThat(extractTotalHits(resp), equalTo(numHits)); } + + @UpdateForV10(owner = UpdateForV10.Owner.DISTRIBUTED_COORDINATION) // this test is just about v8->v9 upgrades, remove it in v10 + public void testBalancedShardsAllocatorThreshold() throws Exception { + assumeTrue("test only applies for v8->v9 upgrades", getOldClusterTestVersion().getMajor() == 8); + + final var chosenValue = randomFrom("0", "0.1", "0.5", "0.999"); + + if (isRunningAgainstOldCluster()) { + final var request = newXContentRequest( + HttpMethod.PUT, + "/_cluster/settings", + (builder, params) -> builder.startObject("persistent").field(THRESHOLD_SETTING.getKey(), chosenValue).endObject() + ); + request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE)); + assertOK(client().performRequest(request)); + } + + final var clusterSettingsResponse = ObjectPath.createFromResponse( + client().performRequest(new Request("GET", "/_cluster/settings")) + ); + + final var settingsPath = "persistent." + THRESHOLD_SETTING.getKey(); + final var settingValue = clusterSettingsResponse.evaluate(settingsPath); + + if (isRunningAgainstOldCluster()) { + assertEquals(chosenValue, settingValue); + } else { + assertNull(settingValue); + assertNotNull(clusterSettingsResponse.evaluate("persistent.archived." + THRESHOLD_SETTING.getKey())); + } + } } diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index 17618d5439d48..e0d1e7aafa637 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; +import org.elasticsearch.Build; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; @@ -81,7 +82,8 @@ private void assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent() throws Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), 1238.49922909, ByteSizeValue.ofGb(32), - ByteSizeValue.ofGb(128) + ByteSizeValue.ofGb(128), + clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() ) ) .toList(); @@ -151,7 +153,8 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), processorsPrecision == ProcessorsPrecision.DOUBLE ? randomDoubleProcessorCount() : 0.5f, ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)) + ByteSizeValue.ofGb(randomIntBetween(128, 256)), + clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() ) ) .toList(); @@ -164,7 +167,8 @@ private void addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(int ve Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), new DesiredNode.ProcessorsRange(minProcessors, minProcessors + randomIntBetween(10, 20)), ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)) + ByteSizeValue.ofGb(randomIntBetween(128, 256)), + clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() ); }).toList(); } @@ -178,7 +182,8 @@ private void addClusterNodesToDesiredNodesWithIntegerProcessors(int version) thr Settings.builder().put(NODE_NAME_SETTING.getKey(), nodeName).build(), randomIntBetween(1, 24), ByteSizeValue.ofGb(randomIntBetween(10, 24)), - ByteSizeValue.ofGb(randomIntBetween(128, 256)) + ByteSizeValue.ofGb(randomIntBetween(128, 256)), + clusterHasFeature(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED) ? null : Build.current().version() ) ) .toList(); diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java index 834d97f755dfb..4caf33feeeebb 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java @@ -23,19 +23,20 @@ import org.junit.rules.TemporaryFolder; import org.junit.rules.TestRule; -import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.function.Supplier; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; public class FileSettingsRoleMappingUpgradeIT extends ParameterizedRollingUpgradeTestCase { - private static final String settingsJSON = """ + private static final int ROLE_MAPPINGS_CLEANUP_MIGRATION_VERSION = 2; + private static final String SETTING_JSON = """ { "metadata": { "version": "1", @@ -53,7 +54,6 @@ public class FileSettingsRoleMappingUpgradeIT extends ParameterizedRollingUpgrad }"""; private static final TemporaryFolder repoDirectory = new TemporaryFolder(); - private static final ElasticsearchCluster cluster = ElasticsearchCluster.local() .distribution(DistributionType.DEFAULT) .version(getOldClusterTestVersion()) @@ -68,7 +68,7 @@ public String get() { .setting("xpack.security.enabled", "true") // workaround to avoid having to set up clients and authorization headers .setting("xpack.security.authc.anonymous.roles", "superuser") - .configFile("operator/settings.json", Resource.fromString(settingsJSON)) + .configFile("operator/settings.json", Resource.fromString(SETTING_JSON)) .build(); @ClassRule @@ -91,7 +91,30 @@ public void checkVersions() { ); } - public void testRoleMappingsAppliedOnUpgrade() throws IOException { + private static void waitForSecurityMigrationCompletionIfIndexExists() throws Exception { + final Request request = new Request("GET", "_cluster/state/metadata/.security-7"); + assertBusy(() -> { + Map indices = new XContentTestUtils.JsonMapView(entityAsMap(client().performRequest(request))).get( + "metadata.indices" + ); + assertNotNull(indices); + // If the security index exists, migration needs to happen. There is a bug in pre cluster state role mappings code that tries + // to write file based role mappings before security index manager state is recovered, this makes it look like the security + // index is outdated (isIndexUpToDate == false). Because we can't rely on the index being there for old versions, this check + // is needed. + if (indices.containsKey(".security-7")) { + // JsonMapView doesn't support . prefixed indices (splits on .) + @SuppressWarnings("unchecked") + String responseVersion = new XContentTestUtils.JsonMapView((Map) indices.get(".security-7")).get( + "migration_version.version" + ); + assertNotNull(responseVersion); + assertTrue(Integer.parseInt(responseVersion) >= ROLE_MAPPINGS_CLEANUP_MIGRATION_VERSION); + } + }); + } + + public void testRoleMappingsAppliedOnUpgrade() throws Exception { if (isOldCluster()) { Request clusterStateRequest = new Request("GET", "/_cluster/state/metadata"); List roleMappings = new XContentTestUtils.JsonMapView(entityAsMap(client().performRequest(clusterStateRequest))).get( @@ -107,11 +130,10 @@ public void testRoleMappingsAppliedOnUpgrade() throws IOException { ).get("metadata.role_mappings.role_mappings"); assertThat(clusterStateRoleMappings, is(not(nullValue()))); assertThat(clusterStateRoleMappings.size(), equalTo(1)); - + waitForSecurityMigrationCompletionIfIndexExists(); assertThat( entityAsMap(client().performRequest(new Request("GET", "/_security/role_mapping"))).keySet(), - // TODO change this to `contains` once the clean-up migration work is merged - hasItem("everyone_kibana-read-only-operator-mapping") + contains("everyone_kibana-read-only-operator-mapping") ); } } diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 1a398f79085e7..6cc2028bffa39 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -61,6 +61,4 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.skipTest("search/330_fetch_fields/Test search rewrite", "warning does not exist for compatibility") task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") task.skipTest("cat.aliases/10_basic/Deprecated local parameter", "CAT APIs not covered by compatibility policy") - task.skipTest("cluster.desired_nodes/10_basic/Test delete desired nodes with node_version generates a warning", "node_version warning is removed in 9.0") - task.skipTest("cluster.desired_nodes/10_basic/Test update desired nodes with node_version generates a warning", "node_version warning is removed in 9.0") }) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml index a45146a4e147a..1d1aa524ffb21 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cluster.desired_nodes/10_basic.yml @@ -59,6 +59,61 @@ teardown: - contains: { nodes: { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb" } } - contains: { nodes: { settings: { node: { name: "instance-000188" } }, processors: 16.0, memory: "128gb", storage: "1tb" } } --- +"Test update desired nodes with node_version generates a warning": + - skip: + reason: "contains is a newly added assertion" + features: ["contains", "allowed_warnings"] + - do: + cluster.state: {} + + # Get master node id + - set: { master_node: master } + + - do: + nodes.info: {} + - set: { nodes.$master.version: es_version } + + - do: + _internal.update_desired_nodes: + history_id: "test" + version: 1 + body: + nodes: + - { settings: { "node.name": "instance-000187" }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } + allowed_warnings: + - "[version removal] Specifying node_version in desired nodes requests is deprecated." + - match: { replaced_existing_history_id: false } + + - do: + _internal.get_desired_nodes: {} + - match: + $body: + history_id: "test" + version: 1 + nodes: + - { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } + + - do: + _internal.update_desired_nodes: + history_id: "test" + version: 2 + body: + nodes: + - { settings: { "node.name": "instance-000187" }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } + - { settings: { "node.name": "instance-000188" }, processors: 16.0, memory: "128gb", storage: "1tb", node_version: $es_version } + allowed_warnings: + - "[version removal] Specifying node_version in desired nodes requests is deprecated." + - match: { replaced_existing_history_id: false } + + - do: + _internal.get_desired_nodes: {} + + - match: { history_id: "test" } + - match: { version: 2 } + - length: { nodes: 2 } + - contains: { nodes: { settings: { node: { name: "instance-000187" } }, processors: 8.5, memory: "64gb", storage: "128gb", node_version: $es_version } } + - contains: { nodes: { settings: { node: { name: "instance-000188" } }, processors: 16.0, memory: "128gb", storage: "1tb", node_version: $es_version } } +--- "Test update move to a new history id": - skip: reason: "contains is a newly added assertion" @@ -144,6 +199,46 @@ teardown: _internal.get_desired_nodes: {} - match: { status: 404 } --- +"Test delete desired nodes with node_version generates a warning": + - skip: + features: allowed_warnings + - do: + cluster.state: {} + + - set: { master_node: master } + + - do: + nodes.info: {} + - set: { nodes.$master.version: es_version } + + - do: + _internal.update_desired_nodes: + history_id: "test" + version: 1 + body: + nodes: + - { settings: { "node.external_id": "instance-000187" }, processors: 8.0, memory: "64gb", storage: "128gb", node_version: $es_version } + allowed_warnings: + - "[version removal] Specifying node_version in desired nodes requests is deprecated." + - match: { replaced_existing_history_id: false } + + - do: + _internal.get_desired_nodes: {} + - match: + $body: + history_id: "test" + version: 1 + nodes: + - { settings: { node: { external_id: "instance-000187" } }, processors: 8.0, memory: "64gb", storage: "128gb", node_version: $es_version } + + - do: + _internal.delete_desired_nodes: {} + + - do: + catch: missing + _internal.get_desired_nodes: {} + - match: { status: 404 } +--- "Test update desired nodes is idempotent": - skip: reason: "contains is a newly added assertion" diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index e0e240be0377a..93c8d66447e34 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -84,15 +84,20 @@ public void run() { for (final DfsSearchResult dfsResult : searchResults) { final SearchShardTarget shardTarget = dfsResult.getSearchShardTarget(); - Transport.Connection connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); - ShardSearchRequest shardRequest = rewriteShardSearchRequest(dfsResult.getShardSearchRequest()); + final int shardIndex = dfsResult.getShardIndex(); QuerySearchRequest querySearchRequest = new QuerySearchRequest( - context.getOriginalIndices(dfsResult.getShardIndex()), + context.getOriginalIndices(shardIndex), dfsResult.getContextId(), - shardRequest, + rewriteShardSearchRequest(dfsResult.getShardSearchRequest()), dfs ); - final int shardIndex = dfsResult.getShardIndex(); + final Transport.Connection connection; + try { + connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); + } catch (Exception e) { + shardFailure(e, querySearchRequest, shardIndex, shardTarget, counter); + return; + } searchTransportService.sendExecuteQuery( connection, querySearchRequest, @@ -112,10 +117,7 @@ protected void innerOnResponse(QuerySearchResult response) { @Override public void onFailure(Exception exception) { try { - context.getLogger() - .debug(() -> "[" + querySearchRequest.contextId() + "] Failed to execute query phase", exception); - progressListener.notifyQueryFailure(shardIndex, shardTarget, exception); - counter.onFailure(shardIndex, shardTarget, exception); + shardFailure(exception, querySearchRequest, shardIndex, shardTarget, counter); } finally { if (context.isPartOfPointInTime(querySearchRequest.contextId()) == false) { // the query might not have been executed at all (for example because thread pool rejected @@ -134,6 +136,18 @@ public void onFailure(Exception exception) { } } + private void shardFailure( + Exception exception, + QuerySearchRequest querySearchRequest, + int shardIndex, + SearchShardTarget shardTarget, + CountedCollector counter + ) { + context.getLogger().debug(() -> "[" + querySearchRequest.contextId() + "] Failed to execute query phase", exception); + progressListener.notifyQueryFailure(shardIndex, shardTarget, exception); + counter.onFailure(shardIndex, shardTarget, exception); + } + // package private for testing ShardSearchRequest rewriteShardSearchRequest(ShardSearchRequest request) { SearchSourceBuilder source = request.source(); diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index 99b24bd483fb4..29aba0eee1f55 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -21,6 +21,7 @@ import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.rank.RankDoc; import org.elasticsearch.search.rank.RankDocShardInfo; +import org.elasticsearch.transport.Transport; import java.util.ArrayList; import java.util.HashMap; @@ -214,9 +215,41 @@ private void executeFetch( final ShardSearchContextId contextId = shardPhaseResult.queryResult() != null ? shardPhaseResult.queryResult().getContextId() : shardPhaseResult.rankFeatureResult().getContextId(); + var listener = new SearchActionListener(shardTarget, shardIndex) { + @Override + public void innerOnResponse(FetchSearchResult result) { + try { + progressListener.notifyFetchResult(shardIndex); + counter.onResult(result); + } catch (Exception e) { + context.onPhaseFailure(FetchSearchPhase.this, "", e); + } + } + + @Override + public void onFailure(Exception e) { + try { + logger.debug(() -> "[" + contextId + "] Failed to execute fetch phase", e); + progressListener.notifyFetchFailure(shardIndex, shardTarget, e); + counter.onFailure(shardIndex, shardTarget, e); + } finally { + // the search context might not be cleared on the node where the fetch was executed for example + // because the action was rejected by the thread pool. in this case we need to send a dedicated + // request to clear the search context. + releaseIrrelevantSearchContext(shardPhaseResult, context); + } + } + }; + final Transport.Connection connection; + try { + connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); + } catch (Exception e) { + listener.onFailure(e); + return; + } context.getSearchTransport() .sendExecuteFetch( - context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()), + connection, new ShardFetchSearchRequest( context.getOriginalIndices(shardPhaseResult.getShardIndex()), contextId, @@ -228,31 +261,7 @@ private void executeFetch( aggregatedDfs ), context.getTask(), - new SearchActionListener<>(shardTarget, shardIndex) { - @Override - public void innerOnResponse(FetchSearchResult result) { - try { - progressListener.notifyFetchResult(shardIndex); - counter.onResult(result); - } catch (Exception e) { - context.onPhaseFailure(FetchSearchPhase.this, "", e); - } - } - - @Override - public void onFailure(Exception e) { - try { - logger.debug(() -> "[" + contextId + "] Failed to execute fetch phase", e); - progressListener.notifyFetchFailure(shardIndex, shardTarget, e); - counter.onFailure(shardIndex, shardTarget, e); - } finally { - // the search context might not be cleared on the node where the fetch was executed for example - // because the action was rejected by the thread pool. in this case we need to send a dedicated - // request to clear the search context. - releaseIrrelevantSearchContext(shardPhaseResult, context); - } - } - } + listener ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java index dd3c28bba0fce..e37d6d1729f9f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/RankFeaturePhase.java @@ -24,6 +24,7 @@ import org.elasticsearch.search.rank.feature.RankFeatureDoc; import org.elasticsearch.search.rank.feature.RankFeatureResult; import org.elasticsearch.search.rank.feature.RankFeatureShardRequest; +import org.elasticsearch.transport.Transport; import java.util.List; @@ -131,9 +132,38 @@ private void executeRankFeatureShardPhase( final SearchShardTarget shardTarget = queryResult.queryResult().getSearchShardTarget(); final ShardSearchContextId contextId = queryResult.queryResult().getContextId(); final int shardIndex = queryResult.getShardIndex(); + var listener = new SearchActionListener(shardTarget, shardIndex) { + @Override + protected void innerOnResponse(RankFeatureResult response) { + try { + progressListener.notifyRankFeatureResult(shardIndex); + rankRequestCounter.onResult(response); + } catch (Exception e) { + context.onPhaseFailure(RankFeaturePhase.this, "", e); + } + } + + @Override + public void onFailure(Exception e) { + try { + logger.debug(() -> "[" + contextId + "] Failed to execute rank phase", e); + progressListener.notifyRankFeatureFailure(shardIndex, shardTarget, e); + rankRequestCounter.onFailure(shardIndex, shardTarget, e); + } finally { + releaseIrrelevantSearchContext(queryResult, context); + } + } + }; + final Transport.Connection connection; + try { + connection = context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()); + } catch (Exception e) { + listener.onFailure(e); + return; + } context.getSearchTransport() .sendExecuteRankFeature( - context.getConnection(shardTarget.getClusterAlias(), shardTarget.getNodeId()), + connection, new RankFeatureShardRequest( context.getOriginalIndices(queryResult.getShardIndex()), queryResult.getContextId(), @@ -141,28 +171,7 @@ private void executeRankFeatureShardPhase( entry ), context.getTask(), - new SearchActionListener<>(shardTarget, shardIndex) { - @Override - protected void innerOnResponse(RankFeatureResult response) { - try { - progressListener.notifyRankFeatureResult(shardIndex); - rankRequestCounter.onResult(response); - } catch (Exception e) { - context.onPhaseFailure(RankFeaturePhase.this, "", e); - } - } - - @Override - public void onFailure(Exception e) { - try { - logger.debug(() -> "[" + contextId + "] Failed to execute rank phase", e); - progressListener.notifyRankFeatureFailure(shardIndex, shardTarget, e); - rankRequestCounter.onFailure(shardIndex, shardTarget, e); - } finally { - releaseIrrelevantSearchContext(queryResult, context); - } - } - } + listener ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 5b7ee04d020fc..26eb266cd457e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -87,12 +87,14 @@ protected void executePhaseOnShard( final SearchShardTarget shard, final SearchActionListener listener ) { - getSearchTransport().sendExecuteDfs( - getConnection(shard.getClusterAlias(), shard.getNodeId()), - buildShardSearchRequest(shardIt, listener.requestIndex), - getTask(), - listener - ); + final Transport.Connection connection; + try { + connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); + } catch (Exception e) { + listener.onFailure(e); + return; + } + getSearchTransport().sendExecuteDfs(connection, buildShardSearchRequest(shardIt, listener.requestIndex), getTask(), listener); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index e0ad4691fa991..33b2cdf74cd79 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -94,8 +94,15 @@ protected void executePhaseOnShard( final SearchShardTarget shard, final SearchActionListener listener ) { + final Transport.Connection connection; + try { + connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); + } catch (Exception e) { + listener.onFailure(e); + return; + } ShardSearchRequest request = rewriteShardSearchRequest(super.buildShardSearchRequest(shardIt, listener.requestIndex)); - getSearchTransport().sendExecuteQuery(getConnection(shard.getClusterAlias(), shard.getNodeId()), request, getTask(), listener); + getSearchTransport().sendExecuteQuery(connection, request, getTask(), listener); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java index fe72a59565cf6..fb8559b19d81d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java @@ -14,6 +14,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -21,6 +22,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.Processors; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; @@ -36,6 +38,7 @@ import java.util.Set; import java.util.TreeSet; import java.util.function.Predicate; +import java.util.regex.Pattern; import static java.lang.String.format; import static org.elasticsearch.node.Node.NODE_EXTERNAL_ID_SETTING; @@ -55,6 +58,8 @@ public final class DesiredNode implements Writeable, ToXContentObject, Comparabl private static final ParseField PROCESSORS_RANGE_FIELD = new ParseField("processors_range"); private static final ParseField MEMORY_FIELD = new ParseField("memory"); private static final ParseField STORAGE_FIELD = new ParseField("storage"); + @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated field + private static final ParseField VERSION_FIELD = new ParseField("node_version"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "desired_node", @@ -64,7 +69,8 @@ public final class DesiredNode implements Writeable, ToXContentObject, Comparabl (Processors) args[1], (ProcessorsRange) args[2], (ByteSizeValue) args[3], - (ByteSizeValue) args[4] + (ByteSizeValue) args[4], + (String) args[5] ) ); @@ -98,6 +104,12 @@ static void configureParser(ConstructingObjectParser parser) { STORAGE_FIELD, ObjectParser.ValueType.STRING ); + parser.declareField( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> p.text(), + VERSION_FIELD, + ObjectParser.ValueType.STRING + ); } private final Settings settings; @@ -106,9 +118,21 @@ static void configureParser(ConstructingObjectParser parser) { private final ByteSizeValue memory; private final ByteSizeValue storage; + @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated version field + private final String version; private final String externalId; private final Set roles; + @Deprecated + public DesiredNode(Settings settings, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage, String version) { + this(settings, null, processorsRange, memory, storage, version); + } + + @Deprecated + public DesiredNode(Settings settings, double processors, ByteSizeValue memory, ByteSizeValue storage, String version) { + this(settings, Processors.of(processors), null, memory, storage, version); + } + public DesiredNode(Settings settings, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage) { this(settings, null, processorsRange, memory, storage); } @@ -118,6 +142,17 @@ public DesiredNode(Settings settings, double processors, ByteSizeValue memory, B } DesiredNode(Settings settings, Processors processors, ProcessorsRange processorsRange, ByteSizeValue memory, ByteSizeValue storage) { + this(settings, processors, processorsRange, memory, storage, null); + } + + DesiredNode( + Settings settings, + Processors processors, + ProcessorsRange processorsRange, + ByteSizeValue memory, + ByteSizeValue storage, + @Deprecated String version + ) { assert settings != null; assert memory != null; assert storage != null; @@ -151,6 +186,7 @@ public DesiredNode(Settings settings, double processors, ByteSizeValue memory, B this.processorsRange = processorsRange; this.memory = memory; this.storage = storage; + this.version = version; this.externalId = NODE_EXTERNAL_ID_SETTING.get(settings); this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(DiscoveryNode.getRolesFromSettings(settings))); } @@ -174,7 +210,19 @@ public static DesiredNode readFrom(StreamInput in) throws IOException { } else { version = Version.readVersion(in).toString(); } - return new DesiredNode(settings, processors, processorsRange, memory, storage); + return new DesiredNode(settings, processors, processorsRange, memory, storage, version); + } + + private static final Pattern SEMANTIC_VERSION_PATTERN = Pattern.compile("^(\\d+\\.\\d+\\.\\d+)\\D?.*"); + + private static Version parseLegacyVersion(String version) { + if (version != null) { + var semanticVersionMatcher = SEMANTIC_VERSION_PATTERN.matcher(version); + if (semanticVersionMatcher.matches()) { + return Version.fromString(semanticVersionMatcher.group(1)); + } + } + return null; } @Override @@ -191,9 +239,15 @@ public void writeTo(StreamOutput out) throws IOException { memory.writeTo(out); storage.writeTo(out); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_13_0)) { - out.writeOptionalString(null); + out.writeOptionalString(version); } else { - Version.writeVersion(Version.CURRENT, out); + Version parsedVersion = parseLegacyVersion(version); + if (version == null) { + // Some node is from before we made the version field not required. If so, fill in with the current node version. + Version.writeVersion(Version.CURRENT, out); + } else { + Version.writeVersion(parsedVersion, out); + } } } @@ -221,6 +275,14 @@ public void toInnerXContent(XContentBuilder builder, Params params) throws IOExc } builder.field(MEMORY_FIELD.getPreferredName(), memory); builder.field(STORAGE_FIELD.getPreferredName(), storage); + addDeprecatedVersionField(builder); + } + + @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) // Remove deprecated field from response + private void addDeprecatedVersionField(XContentBuilder builder) throws IOException { + if (version != null) { + builder.field(VERSION_FIELD.getPreferredName(), version); + } } public boolean hasMasterRole() { @@ -304,6 +366,7 @@ private boolean equalsWithoutProcessorsSpecification(DesiredNode that) { return Objects.equals(settings, that.settings) && Objects.equals(memory, that.memory) && Objects.equals(storage, that.storage) + && Objects.equals(version, that.version) && Objects.equals(externalId, that.externalId) && Objects.equals(roles, that.roles); } @@ -316,7 +379,7 @@ public boolean equalsWithProcessorsCloseTo(DesiredNode that) { @Override public int hashCode() { - return Objects.hash(settings, processors, processorsRange, memory, storage, externalId, roles); + return Objects.hash(settings, processors, processorsRange, memory, storage, version, externalId, roles); } @Override @@ -345,6 +408,10 @@ public String toString() { + '}'; } + public boolean hasVersion() { + return Strings.isNullOrBlank(version) == false; + } + public record ProcessorsRange(Processors min, @Nullable Processors max) implements Writeable, ToXContentObject { private static final ParseField MIN_FIELD = new ParseField("min"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java index 606309adf205c..7b89406be9aa0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java @@ -44,12 +44,13 @@ public record DesiredNodeWithStatus(DesiredNode desiredNode, Status status) (Processors) args[1], (DesiredNode.ProcessorsRange) args[2], (ByteSizeValue) args[3], - (ByteSizeValue) args[4] + (ByteSizeValue) args[4], + (String) args[5] ), // An unknown status is expected during upgrades to versions >= STATUS_TRACKING_SUPPORT_VERSION // the desired node status would be populated when a node in the newer version is elected as // master, the desired nodes status update happens in NodeJoinExecutor. - args[5] == null ? Status.PENDING : (Status) args[5] + args[6] == null ? Status.PENDING : (Status) args[6] ) ); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java index 2390c96664057..a0b35f7cfc3eb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateMetadata.java @@ -91,6 +91,21 @@ public Set conflicts(String handlerName, Set modified) { return Collections.unmodifiableSet(intersect); } + /** + * Get the reserved keys for the handler name + * + * @param handlerName handler name to get keys for + * @return set of keys for that handler + */ + public Set keys(String handlerName) { + ReservedStateHandlerMetadata handlerMetadata = handlers.get(handlerName); + if (handlerMetadata == null || handlerMetadata.keys().isEmpty()) { + return Collections.emptySet(); + } + + return Collections.unmodifiableSet(handlerMetadata.keys()); + } + /** * Reads an {@link ReservedStateMetadata} from a {@link StreamInput} * diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 840aa3a3c1d3f..108bb83d90871 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -32,8 +32,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; -import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -41,7 +39,6 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Tuple; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.gateway.PriorityComparator; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.injection.guice.Inject; @@ -109,7 +106,7 @@ public class BalancedShardsAllocator implements ShardsAllocator { public static final Setting THRESHOLD_SETTING = Setting.floatSetting( "cluster.routing.allocation.balance.threshold", 1.0f, - 0.0f, + 1.0f, Property.Dynamic, Property.NodeScope ); @@ -140,34 +137,10 @@ public BalancedShardsAllocator(ClusterSettings clusterSettings, WriteLoadForecas clusterSettings.initializeAndWatch(INDEX_BALANCE_FACTOR_SETTING, value -> this.indexBalanceFactor = value); clusterSettings.initializeAndWatch(WRITE_LOAD_BALANCE_FACTOR_SETTING, value -> this.writeLoadBalanceFactor = value); clusterSettings.initializeAndWatch(DISK_USAGE_BALANCE_FACTOR_SETTING, value -> this.diskUsageBalanceFactor = value); - clusterSettings.initializeAndWatch(THRESHOLD_SETTING, value -> this.threshold = ensureValidThreshold(value)); + clusterSettings.initializeAndWatch(THRESHOLD_SETTING, value -> this.threshold = value); this.writeLoadForecaster = writeLoadForecaster; } - /** - * Clamp threshold to be at least 1, and log a critical deprecation warning if smaller values are given. - * - * Once {@link org.elasticsearch.Version#V_7_17_0} goes out of scope, start to properly reject such bad values. - */ - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - private static float ensureValidThreshold(float threshold) { - if (1.0f <= threshold) { - return threshold; - } else { - DeprecationLogger.getLogger(BalancedShardsAllocator.class) - .critical( - DeprecationCategory.SETTINGS, - "balance_threshold_too_small", - "ignoring value [{}] for [{}] since it is smaller than 1.0; " - + "setting [{}] to a value smaller than 1.0 will be forbidden in a future release", - threshold, - THRESHOLD_SETTING.getKey(), - THRESHOLD_SETTING.getKey() - ); - return 1.0f; - } - } - @Override public void allocate(RoutingAllocation allocation) { assert allocation.ignoreDisable() == false; diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index efb1facc79b3a..2919f98ee200e 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -128,7 +128,7 @@ private static Version parseUnchecked(String version) { public static final IndexVersion MERGE_ON_RECOVERY_VERSION = def(8_515_00_0, Version.LUCENE_9_11_1); public static final IndexVersion UPGRADE_TO_LUCENE_9_12 = def(8_516_00_0, Version.LUCENE_9_12_0); public static final IndexVersion ENABLE_IGNORE_ABOVE_LOGSDB = def(8_517_00_0, Version.LUCENE_9_12_0); - + public static final IndexVersion ADD_ROLE_MAPPING_CLEANUP_MIGRATION = def(8_518_00_0, Version.LUCENE_9_12_0); public static final IndexVersion UPGRADE_TO_LUCENE_10_0_0 = def(9_000_00_0, Version.LUCENE_10_0_0); /* diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java index b8e1fa0c836a3..ec8bb6285bdd4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestUpdateDesiredNodesAction.java @@ -12,11 +12,13 @@ import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesAction; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -65,6 +67,16 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli ); } + if (clusterSupportsFeature.test(DesiredNode.DESIRED_NODE_VERSION_DEPRECATED)) { + if (updateDesiredNodesRequest.getNodes().stream().anyMatch(DesiredNode::hasVersion)) { + deprecationLogger.compatibleCritical("desired_nodes_version", VERSION_DEPRECATION_MESSAGE); + } + } else { + if (updateDesiredNodesRequest.getNodes().stream().anyMatch(n -> n.hasVersion() == false)) { + throw new XContentParseException("[node_version] field is required and must have a valid value"); + } + } + return restChannel -> client.execute( UpdateDesiredNodesAction.INSTANCE, updateDesiredNodesRequest, diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java index 41207a2d968b8..9a769567bee1c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/BalancedSingleShardTests.java @@ -246,7 +246,7 @@ public void testNodeDecisionsRanking() { // return the same ranking as the current node ClusterState clusterState = ClusterStateCreationUtils.state(randomIntBetween(1, 10), new String[] { "idx" }, 1); ShardRouting shardToRebalance = clusterState.routingTable().index("idx").shardsWithState(ShardRoutingState.STARTED).get(0); - MoveDecision decision = executeRebalanceFor(shardToRebalance, clusterState, emptySet(), -1); + MoveDecision decision = executeRebalanceFor(shardToRebalance, clusterState, emptySet()); int currentRanking = decision.getCurrentNodeRanking(); assertEquals(1, currentRanking); for (NodeAllocationResult result : decision.getNodeDecisions()) { @@ -258,7 +258,7 @@ public void testNodeDecisionsRanking() { clusterState = ClusterStateCreationUtils.state(1, new String[] { "idx" }, randomIntBetween(2, 10)); shardToRebalance = clusterState.routingTable().index("idx").shardsWithState(ShardRoutingState.STARTED).get(0); clusterState = addNodesToClusterState(clusterState, randomIntBetween(1, 10)); - decision = executeRebalanceFor(shardToRebalance, clusterState, emptySet(), 0.01f); + decision = executeRebalanceFor(shardToRebalance, clusterState, emptySet()); for (NodeAllocationResult result : decision.getNodeDecisions()) { assertThat(result.getWeightRanking(), lessThan(decision.getCurrentNodeRanking())); } @@ -285,7 +285,7 @@ public void testNodeDecisionsRanking() { } } clusterState = addNodesToClusterState(clusterState, 1); - decision = executeRebalanceFor(shardToRebalance, clusterState, emptySet(), 0.01f); + decision = executeRebalanceFor(shardToRebalance, clusterState, emptySet()); for (NodeAllocationResult result : decision.getNodeDecisions()) { if (result.getWeightRanking() < decision.getCurrentNodeRanking()) { // highest ranked node should not be any of the initial nodes @@ -298,22 +298,13 @@ public void testNodeDecisionsRanking() { assertTrue(nodesWithTwoShards.contains(result.getNode().getId())); } } - - assertCriticalWarnings(""" - ignoring value [0.01] for [cluster.routing.allocation.balance.threshold] since it is smaller than 1.0; setting \ - [cluster.routing.allocation.balance.threshold] to a value smaller than 1.0 will be forbidden in a future release"""); } private MoveDecision executeRebalanceFor( final ShardRouting shardRouting, final ClusterState clusterState, - final Set noDecisionNodes, - final float threshold + final Set noDecisionNodes ) { - Settings settings = Settings.EMPTY; - if (Float.compare(-1.0f, threshold) != 0) { - settings = Settings.builder().put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), threshold).build(); - } AllocationDecider allocationDecider = new AllocationDecider() { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { @@ -329,7 +320,7 @@ public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation alloca return Decision.YES; } }; - BalancedShardsAllocator allocator = new BalancedShardsAllocator(settings); + BalancedShardsAllocator allocator = new BalancedShardsAllocator(Settings.EMPTY); RoutingAllocation routingAllocation = newRoutingAllocation( new AllocationDeciders(Arrays.asList(allocationDecider, rebalanceDecider)), clusterState diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java index 8392b6fe3e148..98c3451329f52 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java @@ -441,15 +441,10 @@ public void testGetIndexDiskUsageInBytes() { public void testThresholdLimit() { final var badValue = (float) randomDoubleBetween(0.0, Math.nextDown(1.0f), true); - assertEquals( - 1.0f, - new BalancedShardsAllocator(Settings.builder().put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), badValue).build()) - .getThreshold(), - 0.0f + expectThrows( + IllegalArgumentException.class, + () -> new BalancedShardsAllocator(Settings.builder().put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), badValue).build()) ); - assertCriticalWarnings("ignoring value [" + badValue + """ - ] for [cluster.routing.allocation.balance.threshold] since it is smaller than 1.0; setting \ - [cluster.routing.allocation.balance.threshold] to a value smaller than 1.0 will be forbidden in a future release"""); final var goodValue = (float) randomDoubleBetween(1.0, 10.0, true); assertEquals( diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/ByteBufferStreamInputTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/ByteBufferStreamInputTests.java index ef386afdbabbc..971bcb7f0a2e6 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/ByteBufferStreamInputTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/ByteBufferStreamInputTests.java @@ -21,4 +21,52 @@ protected StreamInput getStreamInput(BytesReference bytesReference) throws IOExc final BytesRef bytesRef = bytesReference.toBytesRef(); return new ByteBufferStreamInput(ByteBuffer.wrap(bytesRef.bytes, bytesRef.offset, bytesRef.length)); } + + public void testReadVLongNegative() throws IOException { + for (int i = 0; i < 1024; i++) { + long write = randomNegativeLong(); + BytesStreamOutput out = new BytesStreamOutput(); + out.writeVLongNoCheck(write); + long read = getStreamInput(out.bytes()).readVLong(); + assertEquals(write, read); + } + } + + public void testReadVLongBounds() throws IOException { + long write = Long.MAX_VALUE; + BytesStreamOutput out = new BytesStreamOutput(); + out.writeVLongNoCheck(write); + long read = getStreamInput(out.bytes()).readVLong(); + assertEquals(write, read); + + write = Long.MIN_VALUE; + out = new BytesStreamOutput(); + out.writeVLongNoCheck(write); + read = getStreamInput(out.bytes()).readVLong(); + assertEquals(write, read); + } + + public void testReadVIntNegative() throws IOException { + for (int i = 0; i < 1024; i++) { + int write = randomNegativeInt(); + BytesStreamOutput out = new BytesStreamOutput(); + out.writeVInt(write); + int read = getStreamInput(out.bytes()).readVInt(); + assertEquals(write, read); + } + } + + public void testReadVIntBounds() throws IOException { + int write = Integer.MAX_VALUE; + BytesStreamOutput out = new BytesStreamOutput(); + out.writeVInt(write); + long read = getStreamInput(out.bytes()).readVInt(); + assertEquals(write, read); + + write = Integer.MIN_VALUE; + out = new BytesStreamOutput(); + out.writeVInt(write); + read = getStreamInput(out.bytes()).readVInt(); + assertEquals(write, read); + } } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index 8af36e2f9677e..f67d7ddcc7550 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -24,6 +24,8 @@ import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; @@ -39,9 +41,10 @@ import org.mockito.stubbing.Answer; import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.file.AtomicMoveNotSupportedException; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.StandardCopyOption; import java.util.List; import java.util.Map; import java.util.Set; @@ -50,6 +53,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; +import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.hasEntry; @@ -190,9 +195,7 @@ public void testInitialFileWorks() throws Exception { return null; }).when(controller).process(any(), any(XContentParser.class), any(), any()); - CountDownLatch latch = new CountDownLatch(1); - - fileSettingsService.addFileChangedListener(latch::countDown); + CountDownLatch fileProcessingLatch = new CountDownLatch(1); Files.createDirectories(fileSettingsService.watchedFileDir()); // contents of the JSON don't matter, we just need a file to exist @@ -202,15 +205,14 @@ public void testInitialFileWorks() throws Exception { try { return invocation.callRealMethod(); } finally { - latch.countDown(); + fileProcessingLatch.countDown(); } }).when(fileSettingsService).processFileOnServiceStart(); fileSettingsService.start(); fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); - // wait for listener to be called - assertTrue(latch.await(20, TimeUnit.SECONDS)); + longAwait(fileProcessingLatch); verify(fileSettingsService, times(1)).processFileOnServiceStart(); verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION), any()); @@ -223,40 +225,40 @@ public void testProcessFileChanges() throws Exception { return null; }).when(controller).process(any(), any(XContentParser.class), any(), any()); - // we get three events: initial clusterChanged event, first write, second write - CountDownLatch latch = new CountDownLatch(3); - - fileSettingsService.addFileChangedListener(latch::countDown); - - Files.createDirectories(fileSettingsService.watchedFileDir()); - // contents of the JSON don't matter, we just need a file to exist - writeTestFile(fileSettingsService.watchedFile(), "{}"); - + CountDownLatch changesOnStartLatch = new CountDownLatch(1); doAnswer((Answer) invocation -> { try { return invocation.callRealMethod(); } finally { - latch.countDown(); + changesOnStartLatch.countDown(); } }).when(fileSettingsService).processFileOnServiceStart(); + + CountDownLatch changesLatch = new CountDownLatch(1); doAnswer((Answer) invocation -> { try { return invocation.callRealMethod(); } finally { - latch.countDown(); + changesLatch.countDown(); } }).when(fileSettingsService).processFileChanges(); + Files.createDirectories(fileSettingsService.watchedFileDir()); + // contents of the JSON don't matter, we just need a file to exist + writeTestFile(fileSettingsService.watchedFile(), "{}"); + fileSettingsService.start(); fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); - // second file change; contents still don't matter - overwriteTestFile(fileSettingsService.watchedFile(), "{}"); - // wait for listener to be called (once for initial processing, once for subsequent update) - assertTrue(latch.await(20, TimeUnit.SECONDS)); + longAwait(changesOnStartLatch); verify(fileSettingsService, times(1)).processFileOnServiceStart(); verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION), any()); + + // second file change; contents still don't matter + writeTestFile(fileSettingsService.watchedFile(), "[]"); + longAwait(changesLatch); + verify(fileSettingsService, times(1)).processFileChanges(); verify(controller, times(1)).process(any(), any(XContentParser.class), eq(ReservedStateVersionCheck.HIGHER_VERSION_ONLY), any()); } @@ -295,9 +297,7 @@ public void testStopWorksInMiddleOfProcessing() throws Exception { // Make some fake settings file to cause the file settings service to process it writeTestFile(fileSettingsService.watchedFile(), "{}"); - // we need to wait a bit, on MacOS it may take up to 10 seconds for the Java watcher service to notice the file, - // on Linux is instantaneous. Windows is instantaneous too. - assertTrue(processFileLatch.await(30, TimeUnit.SECONDS)); + longAwait(processFileLatch); // Stopping the service should interrupt the watcher thread, we should be able to stop fileSettingsService.stop(); @@ -352,15 +352,34 @@ public void testHandleSnapshotRestoreResetsMetadata() throws Exception { } // helpers - private void writeTestFile(Path path, String contents) throws IOException { - Path tempFilePath = createTempFile(); - Files.writeString(tempFilePath, contents); - Files.move(tempFilePath, path, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); + private static void writeTestFile(Path path, String contents) { + Path tempFile = null; + try { + tempFile = Files.createTempFile(path.getParent(), path.getFileName().toString(), "tmp"); + Files.writeString(tempFile, contents); + + try { + Files.move(tempFile, path, REPLACE_EXISTING, ATOMIC_MOVE); + } catch (AtomicMoveNotSupportedException e) { + Files.move(tempFile, path, REPLACE_EXISTING); + } + } catch (final IOException e) { + throw new UncheckedIOException(Strings.format("could not write file [%s]", path.toAbsolutePath()), e); + } finally { + // we are ignoring exceptions here, so we do not need handle whether or not tempFile was initialized nor if the file exists + IOUtils.deleteFilesIgnoringExceptions(tempFile); + } } - private void overwriteTestFile(Path path, String contents) throws IOException { - Path tempFilePath = createTempFile(); - Files.writeString(tempFilePath, contents); - Files.move(tempFilePath, path, StandardCopyOption.REPLACE_EXISTING); + // this waits for up to 20 seconds to account for watcher service differences between OSes + // on MacOS it may take up to 10 seconds for the Java watcher service to notice the file, + // on Linux is instantaneous. Windows is instantaneous too. + private static void longAwait(CountDownLatch latch) { + try { + assertTrue("longAwait: CountDownLatch did not reach zero within the timeout", latch.await(20, TimeUnit.SECONDS)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + fail(e, "longAwait: interrupted waiting for CountDownLatch to reach zero"); + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 7a72a7bd0daf0..8bc81fef2157d 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.plugins.Plugin; @@ -365,9 +366,15 @@ protected static Settings.Builder indexSettingsNoReplicas(int shards) { /** * Randomly write an empty snapshot of an older version to an empty repository to simulate an older repository metadata format. */ + @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) + // This used to pick an index version from 7.0.0 to 8.9.0. The minimum now is 8.0.0 but it's not clear what the upper range should be protected void maybeInitWithOldSnapshotVersion(String repoName, Path repoPath) throws Exception { if (randomBoolean() && randomBoolean()) { - initWithSnapshotVersion(repoName, repoPath, IndexVersionUtils.randomVersion()); + initWithSnapshotVersion( + repoName, + repoPath, + IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.V_8_9_0) + ); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 5d7012db80a6e..5bfcd54e963b3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -1032,6 +1032,13 @@ public static long randomNonNegativeLong() { return randomLong() & Long.MAX_VALUE; } + /** + * @return a long between Long.MIN_VALUE and -1 (inclusive) chosen uniformly at random. + */ + public static long randomNegativeLong() { + return randomLong() | Long.MIN_VALUE; + } + /** * @return an int between 0 and Integer.MAX_VALUE (inclusive) chosen uniformly at random. */ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ResultUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ResultUtils.java index 4fe2c9ae486f1..eb68af7589717 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ResultUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/results/ResultUtils.java @@ -14,8 +14,9 @@ public class ResultUtils { public static ElasticsearchStatusException createInvalidChunkedResultException(String expectedResultName, String receivedResultName) { return new ElasticsearchStatusException( - "Expected a chunked inference [{}] received [{}]", - RestStatus.INTERNAL_SERVER_ERROR, + "Received incompatible results. Check that your model_id matches the task_type of this endpoint. " + + "Expected chunked output of type [{}] but received [{}].", + RestStatus.CONFLICT, expectedResultName, receivedResultName ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java index 06c3f75587d62..efd07cceae09b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java @@ -533,6 +533,9 @@ public AssignmentState calculateAssignmentState() { if (assignmentState.equals(AssignmentState.STOPPING)) { return assignmentState; } + if (taskParams.getNumberOfAllocations() == 0) { + return AssignmentState.STARTED; + } if (nodeRoutingTable.values().stream().anyMatch(r -> r.getState().equals(RoutingState.STARTED))) { return AssignmentState.STARTED; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java index c504ebe56ed45..41fd3c6938dfc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java @@ -206,7 +206,7 @@ public RoleMapperExpression getExpression() { * that match the {@link #getExpression() expression} in this mapping. */ public List getRoles() { - return Collections.unmodifiableList(roles); + return roles != null ? Collections.unmodifiableList(roles) : Collections.emptyList(); } /** @@ -214,7 +214,7 @@ public List getRoles() { * that should be assigned to users that match the {@link #getExpression() expression} in this mapping. */ public List getRoleTemplates() { - return Collections.unmodifiableList(roleTemplates); + return roleTemplates != null ? Collections.unmodifiableList(roleTemplates) : Collections.emptyList(); } /** @@ -223,7 +223,7 @@ public List getRoleTemplates() { * This is not used within the mapping process, and does not affect whether the expression matches, nor which roles are assigned. */ public Map getMetadata() { - return Collections.unmodifiableMap(metadata); + return metadata != null ? Collections.unmodifiableMap(metadata) : Collections.emptyMap(); } /** @@ -233,6 +233,15 @@ public boolean isEnabled() { return enabled; } + /** + * Whether this mapping is an operator defined/read only role mapping + */ + public boolean isReadOnly() { + return metadata != null && metadata.get(ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG) instanceof Boolean readOnly + ? readOnly + : false; + } + @Override public String toString() { return getClass().getSimpleName() + "<" + name + " ; " + roles + "/" + roleTemplates + " = " + Strings.toString(expression) + ">"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java index 74c6223b1ebdd..31fe86ca77edd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java @@ -191,6 +191,14 @@ public static boolean hasFallbackName(ExpressionRoleMapping expressionRoleMappin return expressionRoleMapping.getName().equals(FALLBACK_NAME); } + /** + * Check if any of the role mappings have a fallback name + * @return true if any role mappings have the fallback name + */ + public boolean hasAnyMappingWithFallbackName() { + return roleMappings.stream().anyMatch(RoleMappingMetadata::hasFallbackName); + } + /** * Parse a role mapping from XContent, restoring the name from a reserved metadata field. * Used to parse a role mapping annotated with its name in metadata via @see {@link #copyWithNameInMetadata(ExpressionRoleMapping)}. diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java index c3b6e0089b4ae..dc0a8b52e585a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentTests.java @@ -172,6 +172,11 @@ public void testCalculateAssignmentState_GivenNoStartedAssignments() { assertThat(builder.calculateAssignmentState(), equalTo(AssignmentState.STARTING)); } + public void testCalculateAssignmentState_GivenNumAllocationsIsZero() { + TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(0), null); + assertThat(builder.calculateAssignmentState(), equalTo(AssignmentState.STARTED)); + } + public void testCalculateAssignmentState_GivenOneStartedAssignment() { TrainedModelAssignment.Builder builder = TrainedModelAssignment.Builder.empty(randomTaskParams(5), null); builder.addRoutingEntry("node-1", new RoutingInfo(4, 4, RoutingState.STARTING, "")); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java index fdd854e7a9673..9e36055e917a6 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java @@ -204,6 +204,29 @@ public void clusterChanged(ClusterChangedEvent event) { return new Tuple<>(savedClusterState, metadataVersion); } + // Wait for any file metadata + public static Tuple setupClusterStateListener(String node) { + ClusterService clusterService = internalCluster().clusterService(node); + CountDownLatch savedClusterState = new CountDownLatch(1); + AtomicLong metadataVersion = new AtomicLong(-1); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + ReservedStateMetadata reservedState = event.state().metadata().reservedStateMetadata().get(FileSettingsService.NAMESPACE); + if (reservedState != null) { + ReservedStateHandlerMetadata handlerMetadata = reservedState.handlers().get(ReservedRoleMappingAction.NAME); + if (handlerMetadata != null) { + clusterService.removeListener(this); + metadataVersion.set(event.state().metadata().version()); + savedClusterState.countDown(); + } + } + } + }); + + return new Tuple<>(savedClusterState, metadataVersion); + } + public static Tuple setupClusterStateListenerForCleanup(String node) { ClusterService clusterService = internalCluster().clusterService(node); CountDownLatch savedClusterState = new CountDownLatch(1); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/license/LicensingTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/license/LicensingTests.java index 42b807b5f045b..3a9194ee2eac1 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/license/LicensingTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/license/LicensingTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.xpack.security.LocalStateSecurity; import org.hamcrest.Matchers; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import java.nio.file.Files; @@ -241,6 +242,7 @@ public void testNoWarningHeaderWhenAuthenticationFailed() throws Exception { Header[] headers = null; try { getRestClient().performRequest(request); + Assert.fail("expected response exception"); } catch (ResponseException e) { headers = e.getResponse().getHeaders(); List afterWarningHeaders = getWarningHeaders(headers); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java new file mode 100644 index 0000000000000..63c510062bdad --- /dev/null +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/support/CleanupRoleMappingDuplicatesMigrationIT.java @@ -0,0 +1,417 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.support; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.reservedstate.service.FileSettingsService; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingResponse; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.integration.RoleMappingFileSettingsIT.setupClusterStateListener; +import static org.elasticsearch.integration.RoleMappingFileSettingsIT.writeJSONFile; +import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_DATA_KEY; +import static org.elasticsearch.xpack.core.security.action.UpdateIndexMigrationVersionAction.MIGRATION_VERSION_CUSTOM_KEY; +import static org.elasticsearch.xpack.core.security.test.TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) +public class CleanupRoleMappingDuplicatesMigrationIT extends SecurityIntegTestCase { + + private final AtomicLong versionCounter = new AtomicLong(1); + + @Before + public void resetVersion() { + versionCounter.set(1); + } + + private static final String TEST_JSON_WITH_ROLE_MAPPINGS = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "role_mappings": { + "everyone_kibana_alone": { + "enabled": true, + "roles": [ "kibana_user" ], + "rules": { "field": { "username": "*" } }, + "metadata": { + "uuid" : "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", + "_foo": "something" + } + }, + "everyone_fleet_alone": { + "enabled": false, + "roles": [ "fleet_user" ], + "rules": { "field": { "username": "*" } }, + "metadata": { + "uuid" : "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", + "_foo": "something_else" + } + } + } + } + }"""; + + private static final String TEST_JSON_WITH_FALLBACK_NAME = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "role_mappings": { + "name_not_available_after_deserialization": { + "enabled": true, + "roles": [ "kibana_user", "kibana_admin" ], + "rules": { "field": { "username": "*" } }, + "metadata": { + "uuid" : "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", + "_foo": "something" + } + } + } + } + }"""; + + private static final String TEST_JSON_WITH_EMPTY_ROLE_MAPPINGS = """ + { + "metadata": { + "version": "%s", + "compatibility": "8.4.0" + }, + "state": { + "role_mappings": {} + } + }"""; + + public void testMigrationSuccessful() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + ensureGreen(); + final String masterNode = internalCluster().getMasterName(); + + // Create a native role mapping to create security index and trigger migration (skipped initially) + createNativeRoleMapping("everyone_kibana_alone"); + createNativeRoleMapping("everyone_fleet_alone"); + createNativeRoleMapping("dont_clean_this_up"); + assertAllRoleMappings("everyone_kibana_alone", "everyone_fleet_alone", "dont_clean_this_up"); + + // Wait for file watcher to start + awaitFileSettingsWatcher(); + // Setup listener to wait for role mapping + var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode, "everyone_kibana_alone"); + // Write role mappings + writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + // First migration is on a new index, so should skip all migrations. If we reset, it should re-trigger and run all migrations + resetMigration(); + + // Wait for the first migration to finish + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + assertAllRoleMappings( + "everyone_kibana_alone" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "everyone_fleet_alone" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "dont_clean_this_up" + ); + } + + public void testMigrationSuccessfulNoOverlap() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + ensureGreen(); + final String masterNode = internalCluster().getMasterName(); + + // Create a native role mapping to create security index and trigger migration (skipped initially) + createNativeRoleMapping("some_native_mapping"); + createNativeRoleMapping("some_other_native_mapping"); + assertAllRoleMappings("some_native_mapping", "some_other_native_mapping"); + + // Wait for file watcher to start + awaitFileSettingsWatcher(); + // Setup listener to wait for role mapping + var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode, "everyone_kibana_alone"); + // Write role mappings with fallback name, this should block any security migration + writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + // First migration is on a new index, so should skip all migrations. If we reset, it should re-trigger and run all migrations + resetMigration(); + + // Wait for the first migration to finish + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + assertAllRoleMappings( + "everyone_kibana_alone" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "everyone_fleet_alone" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "some_native_mapping", + "some_other_native_mapping" + ); + } + + public void testMigrationSuccessfulNoNative() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + ensureGreen(); + final String masterNode = internalCluster().getMasterName(); + + // Create a native role mapping to create security index and trigger migration (skipped initially) + // Then delete it to test an empty role mapping store + createNativeRoleMapping("some_native_mapping"); + deleteNativeRoleMapping("some_native_mapping"); + // Wait for file watcher to start + awaitFileSettingsWatcher(); + // Setup listener to wait for role mapping + var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode, "everyone_kibana_alone"); + // Write role mappings with fallback name, this should block any security migration + writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + // First migration is on a new index, so should skip all migrations. If we reset, it should re-trigger and run all migrations + resetMigration(); + + // Wait for the first migration to finish + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + assertAllRoleMappings( + "everyone_kibana_alone" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "everyone_fleet_alone" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX + ); + } + + public void testMigrationFallbackNamePreCondition() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + ensureGreen(); + final String masterNode = internalCluster().getMasterName(); + // Wait for file watcher to start + awaitFileSettingsWatcher(); + + // Setup listener to wait for role mapping + var nameNotAvailableListener = setupClusterStateListener(masterNode, "name_not_available_after_deserialization"); + // Write role mappings with fallback name, this should block any security migration + writeJSONFile(masterNode, TEST_JSON_WITH_FALLBACK_NAME, logger, versionCounter); + assertTrue(nameNotAvailableListener.v1().await(20, TimeUnit.SECONDS)); + + // Create a native role mapping to create security index and trigger migration + createNativeRoleMapping("everyone_fleet_alone"); + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + // First migration is on a new index, so should skip all migrations. If we reset, it should re-trigger and run all migrations + resetMigration(); + // Wait for the first migration to finish + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION - 1); + + // Make sure migration didn't run yet (blocked by the fallback name) + assertMigrationLessThan(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + SecurityIndexManager.RoleMappingsCleanupMigrationStatus status = SecurityIndexManager.getRoleMappingsCleanupMigrationStatus( + clusterService.state(), + SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION - 1 + ); + assertThat(status, equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.NOT_READY)); + + // Write file without fallback name in it to unblock migration + writeJSONFile(masterNode, TEST_JSON_WITH_ROLE_MAPPINGS, logger, versionCounter); + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + } + + public void testSkipMigrationNoFileBasedMappings() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + ensureGreen(); + // Create a native role mapping to create security index and trigger migration (skipped initially) + createNativeRoleMapping("everyone_kibana_alone"); + createNativeRoleMapping("everyone_fleet_alone"); + assertAllRoleMappings("everyone_kibana_alone", "everyone_fleet_alone"); + + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + // First migration is on a new index, so should skip all migrations. If we reset, it should re-trigger and run all migrations + resetMigration(); + + // Wait for the first migration to finish + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + assertAllRoleMappings("everyone_kibana_alone", "everyone_fleet_alone"); + } + + public void testSkipMigrationEmptyFileBasedMappings() throws Exception { + internalCluster().setBootstrapMasterNodeIndex(0); + ensureGreen(); + final String masterNode = internalCluster().getMasterName(); + + // Wait for file watcher to start + awaitFileSettingsWatcher(); + // Setup listener to wait for any role mapping + var fileBasedRoleMappingsWrittenListener = setupClusterStateListener(masterNode); + // Write role mappings + writeJSONFile(masterNode, TEST_JSON_WITH_EMPTY_ROLE_MAPPINGS, logger, versionCounter); + assertTrue(fileBasedRoleMappingsWrittenListener.v1().await(20, TimeUnit.SECONDS)); + + // Create a native role mapping to create security index and trigger migration (skipped initially) + createNativeRoleMapping("everyone_kibana_alone"); + createNativeRoleMapping("everyone_fleet_alone"); + assertAllRoleMappings("everyone_kibana_alone", "everyone_fleet_alone"); + + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + // First migration is on a new index, so should skip all migrations. If we reset, it should re-trigger and run all migrations + resetMigration(); + + // Wait for the first migration to finish + waitForMigrationCompletion(SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION); + + assertAllRoleMappings("everyone_kibana_alone", "everyone_fleet_alone"); + } + + public void testNewIndexSkipMigration() { + internalCluster().setBootstrapMasterNodeIndex(0); + final String masterNode = internalCluster().getMasterName(); + ensureGreen(); + CountDownLatch awaitMigrations = awaitMigrationVersionUpdates( + masterNode, + SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION + ); + // Create a native role mapping to create security index and trigger migration + createNativeRoleMapping("everyone_kibana_alone"); + // Make sure no migration ran (set to current version without applying prior migrations) + safeAwait(awaitMigrations); + } + + /** + * Make sure all versions are applied to cluster state sequentially + */ + private CountDownLatch awaitMigrationVersionUpdates(String node, final int... versions) { + final ClusterService clusterService = internalCluster().clusterService(node); + final CountDownLatch allVersionsCountDown = new CountDownLatch(1); + final AtomicInteger currentVersionIdx = new AtomicInteger(0); + clusterService.addListener(new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + int currentMigrationVersion = getCurrentMigrationVersion(event.state()); + if (currentMigrationVersion > 0) { + assertThat(versions[currentVersionIdx.get()], lessThanOrEqualTo(currentMigrationVersion)); + if (versions[currentVersionIdx.get()] == currentMigrationVersion) { + currentVersionIdx.incrementAndGet(); + } + + if (currentVersionIdx.get() >= versions.length) { + clusterService.removeListener(this); + allVersionsCountDown.countDown(); + } + } + } + }); + + return allVersionsCountDown; + } + + private void assertAllRoleMappings(String... roleMappingNames) { + GetRoleMappingsResponse response = client().execute(GetRoleMappingsAction.INSTANCE, new GetRoleMappingsRequest()).actionGet(); + + assertTrue(response.hasMappings()); + assertThat(response.mappings().length, equalTo(roleMappingNames.length)); + + assertThat( + Arrays.stream(response.mappings()).map(ExpressionRoleMapping::getName).toList(), + containsInAnyOrder( + roleMappingNames + + ) + ); + } + + private void awaitFileSettingsWatcher() throws Exception { + final String masterNode = internalCluster().getMasterName(); + FileSettingsService masterFileSettingsService = internalCluster().getInstance(FileSettingsService.class, masterNode); + assertBusy(() -> assertTrue(masterFileSettingsService.watching())); + } + + private void resetMigration() { + client().execute( + UpdateIndexMigrationVersionAction.INSTANCE, + // -1 is a hack, since running a migration on version 0 on a new cluster will cause all migrations to be skipped (not needed) + new UpdateIndexMigrationVersionAction.Request(TimeValue.MAX_VALUE, -1, INTERNAL_SECURITY_MAIN_INDEX_7) + ).actionGet(); + } + + private void createNativeRoleMapping(String name) { + PutRoleMappingRequest request = new PutRoleMappingRequest(); + request.setName(name); + request.setRules(new FieldExpression("username", Collections.singletonList(new FieldExpression.FieldValue("*")))); + request.setRoles(List.of("superuser")); + + ActionFuture response = client().execute(PutRoleMappingAction.INSTANCE, request); + response.actionGet(); + } + + private void deleteNativeRoleMapping(String name) { + DeleteRoleMappingRequest request = new DeleteRoleMappingRequest(); + request.setName(name); + + ActionFuture response = client().execute(DeleteRoleMappingAction.INSTANCE, request); + response.actionGet(); + } + + private void assertMigrationVersionAtLeast(int expectedVersion) { + assertThat(getCurrentMigrationVersion(), greaterThanOrEqualTo(expectedVersion)); + } + + private void assertMigrationLessThan(int expectedVersion) { + assertThat(getCurrentMigrationVersion(), lessThan(expectedVersion)); + } + + private int getCurrentMigrationVersion(ClusterState state) { + IndexMetadata indexMetadata = state.metadata().getIndices().get(INTERNAL_SECURITY_MAIN_INDEX_7); + if (indexMetadata == null || indexMetadata.getCustomData(MIGRATION_VERSION_CUSTOM_KEY) == null) { + return 0; + } + return Integer.parseInt(indexMetadata.getCustomData(MIGRATION_VERSION_CUSTOM_KEY).get(MIGRATION_VERSION_CUSTOM_DATA_KEY)); + } + + private int getCurrentMigrationVersion() { + ClusterService clusterService = internalCluster().getInstance(ClusterService.class); + return getCurrentMigrationVersion(clusterService.state()); + } + + private void waitForMigrationCompletion(int version) throws Exception { + assertBusy(() -> assertMigrationVersionAtLeast(version)); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java index c1fe553f41334..d0292f32cd75f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatures.java @@ -17,13 +17,14 @@ import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MIGRATION_FRAMEWORK; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_PROFILE_ORIGIN_FEATURE; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_ROLES_METADATA_FLATTENED; +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_ROLE_MAPPING_CLEANUP; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.VERSION_SECURITY_PROFILE_ORIGIN; public class SecurityFeatures implements FeatureSpecification { @Override public Set getFeatures() { - return Set.of(SECURITY_ROLES_METADATA_FLATTENED, SECURITY_MIGRATION_FRAMEWORK); + return Set.of(SECURITY_ROLE_MAPPING_CLEANUP, SECURITY_ROLES_METADATA_FLATTENED, SECURITY_MIGRATION_FRAMEWORK); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index 6d9b0ef6aeebe..12ef800a7aae7 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.core.TimeValue; @@ -46,7 +47,9 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata; import org.elasticsearch.xpack.security.SecurityFeatures; +import org.elasticsearch.xpack.security.action.rolemapping.ReservedRoleMappingAction; import java.time.Instant; import java.util.List; @@ -75,7 +78,7 @@ public class SecurityIndexManager implements ClusterStateListener { public static final String SECURITY_VERSION_STRING = "security-version"; - + protected static final String FILE_SETTINGS_METADATA_NAMESPACE = "file_settings"; private static final Logger logger = LogManager.getLogger(SecurityIndexManager.class); /** @@ -86,6 +89,13 @@ public enum Availability { PRIMARY_SHARDS } + public enum RoleMappingsCleanupMigrationStatus { + READY, + NOT_READY, + SKIP, + DONE + } + private final Client client; private final SystemIndexDescriptor systemIndexDescriptor; @@ -195,10 +205,6 @@ public boolean isMigrationsVersionAtLeast(Integer expectedMigrationsVersion) { return indexExists() && this.state.migrationsVersion.compareTo(expectedMigrationsVersion) >= 0; } - public boolean isCreatedOnLatestVersion() { - return this.state.createdOnLatestVersion; - } - public ElasticsearchException getUnavailableReason(Availability availability) { // ensure usage of a local copy so all checks execute against the same state! if (defensiveCopy == false) { @@ -261,6 +267,7 @@ private SystemIndexDescriptor.MappingsVersion getMinSecurityIndexMappingVersion( /** * Check if the index was created on the latest index version available in the cluster */ + private static boolean isCreatedOnLatestVersion(IndexMetadata indexMetadata) { final IndexVersion indexVersionCreated = indexMetadata != null ? SETTING_INDEX_VERSION_CREATED.get(indexMetadata.getSettings()) @@ -268,6 +275,50 @@ private static boolean isCreatedOnLatestVersion(IndexMetadata indexMetadata) { return indexVersionCreated != null && indexVersionCreated.onOrAfter(IndexVersion.current()); } + /** + * Check if a role mappings cleanup migration is needed or has already been performed and if the cluster is ready for a cleanup + * migration + * + * @param clusterState current cluster state + * @param migrationsVersion current migration version + * + * @return RoleMappingsCleanupMigrationStatus + */ + static RoleMappingsCleanupMigrationStatus getRoleMappingsCleanupMigrationStatus(ClusterState clusterState, int migrationsVersion) { + // Migration already finished + if (migrationsVersion >= SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION) { + return RoleMappingsCleanupMigrationStatus.DONE; + } + + ReservedStateMetadata fileSettingsMetadata = clusterState.metadata().reservedStateMetadata().get(FILE_SETTINGS_METADATA_NAMESPACE); + boolean hasFileSettingsMetadata = fileSettingsMetadata != null; + // If there is no fileSettingsMetadata, there should be no reserved state (this is to catch bugs related to + // name changes to FILE_SETTINGS_METADATA_NAMESPACE) + assert hasFileSettingsMetadata || clusterState.metadata().reservedStateMetadata().isEmpty() + : "ReservedStateMetadata contains unknown namespace"; + + // If no file based role mappings available -> migration not needed + if (hasFileSettingsMetadata == false || fileSettingsMetadata.keys(ReservedRoleMappingAction.NAME).isEmpty()) { + return RoleMappingsCleanupMigrationStatus.SKIP; + } + + RoleMappingMetadata roleMappingMetadata = RoleMappingMetadata.getFromClusterState(clusterState); + + // If there are file based role mappings, make sure they have the latest format (name available) and that they have all been + // synced to cluster state (same size as the reserved state keys) + if (roleMappingMetadata.getRoleMappings().size() == fileSettingsMetadata.keys(ReservedRoleMappingAction.NAME).size() + && roleMappingMetadata.hasAnyMappingWithFallbackName() == false) { + return RoleMappingsCleanupMigrationStatus.READY; + } + + // If none of the above conditions are met, wait for a state change to re-evaluate if the cluster is ready for migration + return RoleMappingsCleanupMigrationStatus.NOT_READY; + } + + public RoleMappingsCleanupMigrationStatus getRoleMappingsCleanupMigrationStatus() { + return state.roleMappingsCleanupMigrationStatus; + } + @Override public void clusterChanged(ClusterChangedEvent event) { if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { @@ -285,8 +336,12 @@ public void clusterChanged(ClusterChangedEvent event) { Tuple available = checkIndexAvailable(event.state()); final boolean indexAvailableForWrite = available.v1(); final boolean indexAvailableForSearch = available.v2(); - final boolean mappingIsUpToDate = indexMetadata == null || checkIndexMappingUpToDate(event.state()); final int migrationsVersion = getMigrationVersionFromIndexMetadata(indexMetadata); + final RoleMappingsCleanupMigrationStatus roleMappingsCleanupMigrationStatus = getRoleMappingsCleanupMigrationStatus( + event.state(), + migrationsVersion + ); + final boolean mappingIsUpToDate = indexMetadata == null || checkIndexMappingUpToDate(event.state()); final SystemIndexDescriptor.MappingsVersion minClusterMappingVersion = getMinSecurityIndexMappingVersion(event.state()); final int indexMappingVersion = loadIndexMappingVersion(systemIndexDescriptor.getAliasName(), event.state()); final String concreteIndexName = indexMetadata == null @@ -315,6 +370,7 @@ public void clusterChanged(ClusterChangedEvent event) { indexAvailableForWrite, mappingIsUpToDate, createdOnLatestVersion, + roleMappingsCleanupMigrationStatus, migrationsVersion, minClusterMappingVersion, indexMappingVersion, @@ -474,7 +530,8 @@ private Tuple checkIndexAvailable(ClusterState state) { public boolean isEligibleSecurityMigration(SecurityMigrations.SecurityMigration securityMigration) { return state.securityFeatures.containsAll(securityMigration.nodeFeaturesRequired()) - && state.indexMappingVersion >= securityMigration.minMappingVersion(); + && state.indexMappingVersion >= securityMigration.minMappingVersion() + && securityMigration.checkPreConditions(state); } public boolean isReadyForSecurityMigration(SecurityMigrations.SecurityMigration securityMigration) { @@ -680,6 +737,10 @@ public void onFailure(Exception e) { } } + public boolean isCreatedOnLatestVersion() { + return state.createdOnLatestVersion; + } + /** * Return true if the state moves from an unhealthy ("RED") index state to a healthy ("non-RED") state. */ @@ -714,6 +775,7 @@ public static class State { null, null, null, + null, Set.of() ); public final Instant creationTime; @@ -722,6 +784,7 @@ public static class State { public final boolean indexAvailableForWrite; public final boolean mappingUpToDate; public final boolean createdOnLatestVersion; + public final RoleMappingsCleanupMigrationStatus roleMappingsCleanupMigrationStatus; public final Integer migrationsVersion; // Min mapping version supported by the descriptors in the cluster public final SystemIndexDescriptor.MappingsVersion minClusterMappingVersion; @@ -740,6 +803,7 @@ public State( boolean indexAvailableForWrite, boolean mappingUpToDate, boolean createdOnLatestVersion, + RoleMappingsCleanupMigrationStatus roleMappingsCleanupMigrationStatus, Integer migrationsVersion, SystemIndexDescriptor.MappingsVersion minClusterMappingVersion, Integer indexMappingVersion, @@ -756,6 +820,7 @@ public State( this.mappingUpToDate = mappingUpToDate; this.migrationsVersion = migrationsVersion; this.createdOnLatestVersion = createdOnLatestVersion; + this.roleMappingsCleanupMigrationStatus = roleMappingsCleanupMigrationStatus; this.minClusterMappingVersion = minClusterMappingVersion; this.indexMappingVersion = indexMappingVersion; this.concreteIndexName = concreteIndexName; @@ -776,6 +841,7 @@ public boolean equals(Object o) { && indexAvailableForWrite == state.indexAvailableForWrite && mappingUpToDate == state.mappingUpToDate && createdOnLatestVersion == state.createdOnLatestVersion + && roleMappingsCleanupMigrationStatus == state.roleMappingsCleanupMigrationStatus && Objects.equals(indexMappingVersion, state.indexMappingVersion) && Objects.equals(migrationsVersion, state.migrationsVersion) && Objects.equals(minClusterMappingVersion, state.minClusterMappingVersion) @@ -798,6 +864,7 @@ public int hashCode() { indexAvailableForWrite, mappingUpToDate, createdOnLatestVersion, + roleMappingsCleanupMigrationStatus, migrationsVersion, minClusterMappingVersion, indexMappingVersion, @@ -822,6 +889,8 @@ public String toString() { + mappingUpToDate + ", createdOnLatestVersion=" + createdOnLatestVersion + + ", roleMappingsCleanupMigrationStatus=" + + roleMappingsCleanupMigrationStatus + ", migrationsVersion=" + migrationsVersion + ", minClusterMappingVersion=" diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java index 5cd8cba763d3d..203dec9e25b91 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityMigrations.java @@ -11,6 +11,8 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.BoolQueryBuilder; @@ -20,20 +22,35 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequestBuilder; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingResponse; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequestBuilder; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; +import java.util.stream.Collectors; +import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.RoleMappingsCleanupMigrationStatus.READY; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.RoleMappingsCleanupMigrationStatus.SKIP; +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SecurityMainIndexMappingVersion.ADD_MANAGE_ROLES_PRIVILEGE; import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SecurityMainIndexMappingVersion.ADD_REMOTE_CLUSTER_AND_DESCRIPTION_FIELDS; -/** - * Interface for creating SecurityMigrations that will be automatically applied once to existing .security indices - * IMPORTANT: A new index version needs to be added to {@link org.elasticsearch.index.IndexVersions} for the migration to be triggered - */ public class SecurityMigrations { + /** + * Interface for creating SecurityMigrations that will be automatically applied once to existing .security indices + * IMPORTANT: A new index version needs to be added to {@link org.elasticsearch.index.IndexVersions} for the migration to be triggered + */ public interface SecurityMigration { /** * Method that will execute the actual migration - needs to be idempotent and non-blocking @@ -52,6 +69,16 @@ public interface SecurityMigration { */ Set nodeFeaturesRequired(); + /** + * Check that any pre-conditions are met before launching migration + * + * @param securityIndexManagerState current state of the security index + * @return true if pre-conditions met, otherwise false + */ + default boolean checkPreConditions(SecurityIndexManager.State securityIndexManagerState) { + return true; + } + /** * The min mapping version required to support this migration. This makes sure that the index has at least the min mapping that is * required to support the migration. @@ -62,63 +89,163 @@ public interface SecurityMigration { } public static final Integer ROLE_METADATA_FLATTENED_MIGRATION_VERSION = 1; + public static final Integer CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION = 2; + private static final Logger logger = LogManager.getLogger(SecurityMigration.class); public static final TreeMap MIGRATIONS_BY_VERSION = new TreeMap<>( - Map.of(ROLE_METADATA_FLATTENED_MIGRATION_VERSION, new SecurityMigration() { - private static final Logger logger = LogManager.getLogger(SecurityMigration.class); - - @Override - public void migrate(SecurityIndexManager indexManager, Client client, ActionListener listener) { - BoolQueryBuilder filterQuery = new BoolQueryBuilder().filter(QueryBuilders.termQuery("type", "role")) - .mustNot(QueryBuilders.existsQuery("metadata_flattened")); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().query(filterQuery).size(0).trackTotalHits(true); - SearchRequest countRequest = new SearchRequest(indexManager.getConcreteIndexName()); - countRequest.source(searchSourceBuilder); - - client.search(countRequest, ActionListener.wrap(response -> { - // If there are no roles, skip migration - if (response.getHits().getTotalHits().value() > 0) { - logger.info("Preparing to migrate [" + response.getHits().getTotalHits().value() + "] roles"); - updateRolesByQuery(indexManager, client, filterQuery, listener); - } else { - listener.onResponse(null); - } + Map.of( + ROLE_METADATA_FLATTENED_MIGRATION_VERSION, + new RoleMetadataFlattenedMigration(), + CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION, + new CleanupRoleMappingDuplicatesMigration() + ) + ); + + public static class RoleMetadataFlattenedMigration implements SecurityMigration { + @Override + public void migrate(SecurityIndexManager indexManager, Client client, ActionListener listener) { + BoolQueryBuilder filterQuery = new BoolQueryBuilder().filter(QueryBuilders.termQuery("type", "role")) + .mustNot(QueryBuilders.existsQuery("metadata_flattened")); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().query(filterQuery).size(0).trackTotalHits(true); + SearchRequest countRequest = new SearchRequest(indexManager.getConcreteIndexName()); + countRequest.source(searchSourceBuilder); + + client.search(countRequest, ActionListener.wrap(response -> { + // If there are no roles, skip migration + if (response.getHits().getTotalHits().value() > 0) { + logger.info("Preparing to migrate [" + response.getHits().getTotalHits().value() + "] roles"); + updateRolesByQuery(indexManager, client, filterQuery, listener); + } else { + listener.onResponse(null); + } + }, listener::onFailure)); + } + + private void updateRolesByQuery( + SecurityIndexManager indexManager, + Client client, + BoolQueryBuilder filterQuery, + ActionListener listener + ) { + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(indexManager.getConcreteIndexName()); + updateByQueryRequest.setQuery(filterQuery); + updateByQueryRequest.setScript( + new Script(ScriptType.INLINE, "painless", "ctx._source.metadata_flattened = ctx._source.metadata", Collections.emptyMap()) + ); + client.admin() + .cluster() + .execute(UpdateByQueryAction.INSTANCE, updateByQueryRequest, ActionListener.wrap(bulkByScrollResponse -> { + logger.info("Migrated [" + bulkByScrollResponse.getTotal() + "] roles"); + listener.onResponse(null); }, listener::onFailure)); + } + + @Override + public Set nodeFeaturesRequired() { + return Set.of(SecuritySystemIndices.SECURITY_ROLES_METADATA_FLATTENED); + } + + @Override + public int minMappingVersion() { + return ADD_REMOTE_CLUSTER_AND_DESCRIPTION_FIELDS.id(); + } + } + + public static class CleanupRoleMappingDuplicatesMigration implements SecurityMigration { + @Override + public void migrate(SecurityIndexManager indexManager, Client client, ActionListener listener) { + if (indexManager.getRoleMappingsCleanupMigrationStatus() == SKIP) { + listener.onResponse(null); + return; } + assert indexManager.getRoleMappingsCleanupMigrationStatus() == READY; - private void updateRolesByQuery( - SecurityIndexManager indexManager, - Client client, - BoolQueryBuilder filterQuery, - ActionListener listener - ) { - UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(indexManager.getConcreteIndexName()); - updateByQueryRequest.setQuery(filterQuery); - updateByQueryRequest.setScript( - new Script( - ScriptType.INLINE, - "painless", - "ctx._source.metadata_flattened = ctx._source.metadata", - Collections.emptyMap() - ) + getRoleMappings(client, ActionListener.wrap(roleMappings -> { + List roleMappingsToDelete = getDuplicateRoleMappingNames(roleMappings.mappings()); + if (roleMappingsToDelete.isEmpty() == false) { + logger.info("Found [" + roleMappingsToDelete.size() + "] role mapping(s) to cleanup in .security index."); + deleteNativeRoleMappings(client, roleMappingsToDelete, listener); + } else { + listener.onResponse(null); + } + }, listener::onFailure)); + } + + private void getRoleMappings(Client client, ActionListener listener) { + executeAsyncWithOrigin( + client, + SECURITY_ORIGIN, + GetRoleMappingsAction.INSTANCE, + new GetRoleMappingsRequestBuilder(client).request(), + listener + ); + } + + private void deleteNativeRoleMappings(Client client, List names, ActionListener listener) { + assert names.isEmpty() == false; + ActionListener groupListener = new GroupedActionListener<>( + names.size(), + ActionListener.wrap(responses -> { + long foundRoleMappings = responses.stream().filter(DeleteRoleMappingResponse::isFound).count(); + if (responses.size() > foundRoleMappings) { + logger.warn( + "[" + (responses.size() - foundRoleMappings) + "] Role mapping(s) not found during role mapping clean up." + ); + } + if (foundRoleMappings > 0) { + logger.info("Deleted [" + foundRoleMappings + "] duplicated role mapping(s) from .security index"); + } + listener.onResponse(null); + }, listener::onFailure) + ); + + for (String name : names) { + executeAsyncWithOrigin( + client, + SECURITY_ORIGIN, + DeleteRoleMappingAction.INSTANCE, + new DeleteRoleMappingRequestBuilder(client).name(name).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).request(), + groupListener ); - client.admin() - .cluster() - .execute(UpdateByQueryAction.INSTANCE, updateByQueryRequest, ActionListener.wrap(bulkByScrollResponse -> { - logger.info("Migrated [" + bulkByScrollResponse.getTotal() + "] roles"); - listener.onResponse(null); - }, listener::onFailure)); } - @Override - public Set nodeFeaturesRequired() { - return Set.of(SecuritySystemIndices.SECURITY_ROLES_METADATA_FLATTENED); - } + } - @Override - public int minMappingVersion() { - return ADD_REMOTE_CLUSTER_AND_DESCRIPTION_FIELDS.id(); - } - }) - ); + @Override + public boolean checkPreConditions(SecurityIndexManager.State securityIndexManagerState) { + // Block migration until expected role mappings are in cluster state and in the correct format or skip if no role mappings + // are expected + return securityIndexManagerState.roleMappingsCleanupMigrationStatus == READY + || securityIndexManagerState.roleMappingsCleanupMigrationStatus == SKIP; + } + + @Override + public Set nodeFeaturesRequired() { + return Set.of(SecuritySystemIndices.SECURITY_ROLE_MAPPING_CLEANUP); + } + + @Override + public int minMappingVersion() { + return ADD_MANAGE_ROLES_PRIVILEGE.id(); + } + + // Visible for testing + protected static List getDuplicateRoleMappingNames(ExpressionRoleMapping... roleMappings) { + // Partition role mappings on if they're cluster state role mappings (true) or native role mappings (false) + Map> partitionedRoleMappings = Arrays.stream(roleMappings) + .collect(Collectors.partitioningBy(ExpressionRoleMapping::isReadOnly)); + + Set clusterStateRoleMappings = partitionedRoleMappings.get(true) + .stream() + .map(ExpressionRoleMapping::getName) + .map(ExpressionRoleMapping::removeReadOnlySuffixIfPresent) + .collect(Collectors.toSet()); + + return partitionedRoleMappings.get(false) + .stream() + .map(ExpressionRoleMapping::getName) + .filter(clusterStateRoleMappings::contains) + .toList(); + } + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java index 36ea14c6e101b..77c7d19e94a9b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecuritySystemIndices.java @@ -61,6 +61,7 @@ public class SecuritySystemIndices { public static final NodeFeature SECURITY_PROFILE_ORIGIN_FEATURE = new NodeFeature("security.security_profile_origin"); public static final NodeFeature SECURITY_MIGRATION_FRAMEWORK = new NodeFeature("security.migration_framework"); public static final NodeFeature SECURITY_ROLES_METADATA_FLATTENED = new NodeFeature("security.roles_metadata_flattened"); + public static final NodeFeature SECURITY_ROLE_MAPPING_CLEANUP = new NodeFeature("security.role_mapping_cleanup"); /** * Security managed index mappings used to be updated based on the product version. They are now updated based on per-index mappings diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilter.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilter.java index 9a3c9c847d131..55a3dbcdfde95 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilter.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilter.java @@ -10,6 +10,7 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.handler.ipfilter.AbstractRemoteAddressFilter; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.xpack.security.transport.filter.IPFilter; import java.net.InetSocketAddress; @@ -19,16 +20,21 @@ class IpFilterRemoteAddressFilter extends AbstractRemoteAddressFilter listener) { + listener.onResponse(null); + } + + @Override + public Set nodeFeaturesRequired() { + return Set.of(); + } + + @Override + public int minMappingVersion() { + return 0; + } + + @Override + public boolean checkPreConditions(SecurityIndexManager.State securityIndexManagerState) { + return false; + } + })); + } + + private ClusterState.Builder clusterStateBuilderForMigrationTesting() { + return createClusterState( + TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7, + SecuritySystemIndices.SECURITY_MAIN_ALIAS, + IndexMetadata.State.OPEN + ); + } + + public void testGetRoleMappingsCleanupMigrationStatus() { + { + assertThat( + SecurityIndexManager.getRoleMappingsCleanupMigrationStatus( + clusterStateBuilderForMigrationTesting().build(), + SecurityMigrations.CLEANUP_ROLE_MAPPING_DUPLICATES_MIGRATION_VERSION + ), + equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.DONE) + ); + } + { + // Migration should be skipped + ClusterState.Builder clusterStateBuilder = clusterStateBuilderForMigrationTesting(); + Metadata.Builder metadataBuilder = new Metadata.Builder(); + metadataBuilder.put(ReservedStateMetadata.builder(FILE_SETTINGS_METADATA_NAMESPACE).build()); + assertThat( + SecurityIndexManager.getRoleMappingsCleanupMigrationStatus(clusterStateBuilder.metadata(metadataBuilder).build(), 1), + equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.SKIP) + ); + } + { + // Not ready for migration + ClusterState.Builder clusterStateBuilder = clusterStateBuilderForMigrationTesting(); + Metadata.Builder metadataBuilder = new Metadata.Builder(); + ReservedStateMetadata.Builder builder = ReservedStateMetadata.builder(FILE_SETTINGS_METADATA_NAMESPACE); + // File settings role mappings exist + ReservedStateHandlerMetadata reservedStateHandlerMetadata = new ReservedStateHandlerMetadata( + ReservedRoleMappingAction.NAME, + Set.of("role_mapping_1") + ); + builder.putHandler(reservedStateHandlerMetadata); + metadataBuilder.put(builder.build()); + + // No role mappings in cluster state yet + metadataBuilder.putCustom(RoleMappingMetadata.TYPE, new RoleMappingMetadata(Set.of())); + + assertThat( + SecurityIndexManager.getRoleMappingsCleanupMigrationStatus(clusterStateBuilder.metadata(metadataBuilder).build(), 1), + equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.NOT_READY) + ); + } + { + // Old role mappings in cluster state + final ClusterState.Builder clusterStateBuilder = clusterStateBuilderForMigrationTesting(); + Metadata.Builder metadataBuilder = new Metadata.Builder(); + ReservedStateMetadata.Builder builder = ReservedStateMetadata.builder(FILE_SETTINGS_METADATA_NAMESPACE); + // File settings role mappings exist + ReservedStateHandlerMetadata reservedStateHandlerMetadata = new ReservedStateHandlerMetadata( + ReservedRoleMappingAction.NAME, + Set.of("role_mapping_1") + ); + builder.putHandler(reservedStateHandlerMetadata); + metadataBuilder.put(builder.build()); + + // Role mappings in cluster state with fallback name + metadataBuilder.putCustom( + RoleMappingMetadata.TYPE, + new RoleMappingMetadata(Set.of(new ExpressionRoleMapping(RoleMappingMetadata.FALLBACK_NAME, null, null, null, null, true))) + ); + + assertThat( + SecurityIndexManager.getRoleMappingsCleanupMigrationStatus(clusterStateBuilder.metadata(metadataBuilder).build(), 1), + equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.NOT_READY) + ); + } + { + // Ready for migration + final ClusterState.Builder clusterStateBuilder = clusterStateBuilderForMigrationTesting(); + Metadata.Builder metadataBuilder = new Metadata.Builder(); + ReservedStateMetadata.Builder builder = ReservedStateMetadata.builder(FILE_SETTINGS_METADATA_NAMESPACE); + // File settings role mappings exist + ReservedStateHandlerMetadata reservedStateHandlerMetadata = new ReservedStateHandlerMetadata( + ReservedRoleMappingAction.NAME, + Set.of("role_mapping_1") + ); + builder.putHandler(reservedStateHandlerMetadata); + metadataBuilder.put(builder.build()); + + // Role mappings in cluster state + metadataBuilder.putCustom( + RoleMappingMetadata.TYPE, + new RoleMappingMetadata(Set.of(new ExpressionRoleMapping("role_mapping_1", null, null, null, null, true))) + ); + + assertThat( + SecurityIndexManager.getRoleMappingsCleanupMigrationStatus(clusterStateBuilder.metadata(metadataBuilder).build(), 1), + equalTo(SecurityIndexManager.RoleMappingsCleanupMigrationStatus.READY) + ); + } + } + public void testProcessClosedIndexState() { // Index initially exists final ClusterState.Builder indexAvailable = createClusterState( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityMigrationsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityMigrationsTests.java new file mode 100644 index 0000000000000..3d3cc47b55cf6 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityMigrationsTests.java @@ -0,0 +1,174 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.security.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingResponse; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsAction; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; +import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.junit.After; +import org.junit.Before; + +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SecurityMigrationsTests extends ESTestCase { + private ThreadPool threadPool; + private Client client; + + public void testGetDuplicateRoleMappingNames() { + assertThat(SecurityMigrations.CleanupRoleMappingDuplicatesMigration.getDuplicateRoleMappingNames(), empty()); + assertThat( + SecurityMigrations.CleanupRoleMappingDuplicatesMigration.getDuplicateRoleMappingNames( + nativeRoleMapping("roleMapping1"), + nativeRoleMapping("roleMapping2") + ), + empty() + ); + assertThat( + SecurityMigrations.CleanupRoleMappingDuplicatesMigration.getDuplicateRoleMappingNames( + nativeRoleMapping("roleMapping1"), + reservedRoleMapping("roleMapping1") + ), + equalTo(List.of("roleMapping1")) + ); + + { + List duplicates = SecurityMigrations.CleanupRoleMappingDuplicatesMigration.getDuplicateRoleMappingNames( + nativeRoleMapping("roleMapping1"), + nativeRoleMapping("roleMapping2"), + reservedRoleMapping("roleMapping1"), + reservedRoleMapping("roleMapping2") + ); + assertThat(duplicates, hasSize(2)); + assertThat(duplicates, containsInAnyOrder("roleMapping1", "roleMapping2")); + } + { + List duplicates = SecurityMigrations.CleanupRoleMappingDuplicatesMigration.getDuplicateRoleMappingNames( + nativeRoleMapping("roleMapping1"), + nativeRoleMapping("roleMapping2"), + nativeRoleMapping("roleMapping3"), + reservedRoleMapping("roleMapping1"), + reservedRoleMapping("roleMapping2"), + reservedRoleMapping("roleMapping4") + ); + assertThat(duplicates, hasSize(2)); + assertThat(duplicates, containsInAnyOrder("roleMapping1", "roleMapping2")); + } + { + List duplicates = SecurityMigrations.CleanupRoleMappingDuplicatesMigration.getDuplicateRoleMappingNames( + nativeRoleMapping("roleMapping1" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX), + nativeRoleMapping("roleMapping2" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX), + nativeRoleMapping("roleMapping3"), + reservedRoleMapping("roleMapping1"), + reservedRoleMapping("roleMapping2"), + reservedRoleMapping("roleMapping3") + ); + assertThat(duplicates, hasSize(1)); + assertThat(duplicates, containsInAnyOrder("roleMapping3")); + } + } + + private static ExpressionRoleMapping reservedRoleMapping(String name) { + return new ExpressionRoleMapping( + name + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + null, + null, + null, + Map.of(ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG, true), + true + ); + } + + private static ExpressionRoleMapping nativeRoleMapping(String name) { + return new ExpressionRoleMapping(name, null, null, null, randomBoolean() ? null : Map.of(), true); + } + + public void testCleanupRoleMappingDuplicatesMigrationPartialFailure() { + // Make sure migration continues even if a duplicate is not found + SecurityIndexManager securityIndexManager = mock(SecurityIndexManager.class); + when(securityIndexManager.getRoleMappingsCleanupMigrationStatus()).thenReturn( + SecurityIndexManager.RoleMappingsCleanupMigrationStatus.READY + ); + doAnswer(inv -> { + final Object[] args = inv.getArguments(); + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) args[2]; + listener.onResponse( + new GetRoleMappingsResponse( + nativeRoleMapping("duplicate-0"), + reservedRoleMapping("duplicate-0"), + nativeRoleMapping("duplicate-1"), + reservedRoleMapping("duplicate-1"), + nativeRoleMapping("duplicate-2"), + reservedRoleMapping("duplicate-2") + ) + ); + return null; + }).when(client).execute(eq(GetRoleMappingsAction.INSTANCE), any(GetRoleMappingsRequest.class), any()); + + final boolean[] duplicatesDeleted = new boolean[3]; + doAnswer(inv -> { + final Object[] args = inv.getArguments(); + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) args[2]; + DeleteRoleMappingRequest request = (DeleteRoleMappingRequest) args[1]; + if (request.getName().equals("duplicate-0")) { + duplicatesDeleted[0] = true; + } + if (request.getName().equals("duplicate-1")) { + if (randomBoolean()) { + listener.onResponse(new DeleteRoleMappingResponse(false)); + } else { + listener.onFailure(new IllegalStateException("bad state")); + } + } + if (request.getName().equals("duplicate-2")) { + duplicatesDeleted[2] = true; + } + return null; + }).when(client).execute(eq(DeleteRoleMappingAction.INSTANCE), any(DeleteRoleMappingRequest.class), any()); + + SecurityMigrations.SecurityMigration securityMigration = new SecurityMigrations.CleanupRoleMappingDuplicatesMigration(); + securityMigration.migrate(securityIndexManager, client, ActionListener.noop()); + + assertTrue(duplicatesDeleted[0]); + assertFalse(duplicatesDeleted[1]); + assertTrue(duplicatesDeleted[2]); + } + + @Before + public void createClientAndThreadPool() { + threadPool = new TestThreadPool("cleanup role mappings test pool"); + client = mock(Client.class); + when(client.threadPool()).thenReturn(threadPool); + } + + @After + public void stopThreadPool() { + terminate(threadPool); + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java index 67402bf323ff5..ed28bd383c48e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/IpFilterRemoteAddressFilterTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.license.TestUtils; @@ -90,10 +91,11 @@ public void init() throws Exception { ipFilter.setBoundHttpTransportAddress(httpTransport.boundAddress()); } + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); if (isHttpEnabled) { - handler = new IpFilterRemoteAddressFilter(ipFilter, IPFilter.HTTP_PROFILE_NAME); + handler = new IpFilterRemoteAddressFilter(ipFilter, IPFilter.HTTP_PROFILE_NAME, threadContext); } else { - handler = new IpFilterRemoteAddressFilter(ipFilter, "default"); + handler = new IpFilterRemoteAddressFilter(ipFilter, "default", threadContext); } } @@ -106,7 +108,11 @@ public void testThatFilteringWorksByIp() throws Exception { } public void testFilteringWorksForRemoteClusterPort() throws Exception { - handler = new IpFilterRemoteAddressFilter(ipFilter, RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE); + handler = new IpFilterRemoteAddressFilter( + ipFilter, + RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE, + new ThreadContext(Settings.EMPTY) + ); InetSocketAddress localhostAddr = new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 12345); assertThat(handler.accept(mock(ChannelHandlerContext.class), localhostAddr), is(true)); diff --git a/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/feature/FeatureFactory.java b/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/feature/FeatureFactory.java index 0c4ff1780ae1e..b5f9088edc4be 100644 --- a/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/feature/FeatureFactory.java +++ b/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/feature/FeatureFactory.java @@ -307,8 +307,11 @@ private static org.locationtech.jts.geom.Geometry clipGeometry( return null; } } catch (TopologyException ex) { - // we should never get here but just to be super safe because a TopologyException will kill the node - throw new IllegalArgumentException(ex); + // Note we should never throw a TopologyException as it kill the node + // unfortunately the intersection method is not perfect and it will throw this error for complex + // geometries even when valid. We can still simplify such geometry so we just return the original and + // let the simplification process handle it. + return geometry; } } } diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index b9b0531fa5b68..38fbf99068a9b 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -88,6 +88,8 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> keystore 'xpack.watcher.encryption_key', file("${project.projectDir}/src/test/resources/system_key") setting 'xpack.watcher.encrypt_sensitive_data', 'true' + extraConfigFile 'operator/settings.json', file("${project.projectDir}/src/test/resources/operator_defined_role_mappings.json") + // Old versions of the code contain an invalid assertion that trips // during tests. Versions 5.6.9 and 6.2.4 have been fixed by removing // the assertion, but this is impossible for released versions. diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java index 4324aed5fee18..b17644cd1c2a9 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -9,11 +9,13 @@ import org.elasticsearch.Build; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Booleans; +import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.test.SecuritySettingsSourceField; import org.junit.Before; @@ -21,6 +23,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; public abstract class AbstractUpgradeTestCase extends ESRestTestCase { @@ -149,4 +152,22 @@ public void setupForTests() throws Exception { } }); } + + protected static void waitForSecurityMigrationCompletion(RestClient adminClient, int version) throws Exception { + final Request request = new Request("GET", "_cluster/state/metadata/.security-7"); + assertBusy(() -> { + Map indices = new XContentTestUtils.JsonMapView(entityAsMap(adminClient.performRequest(request))).get( + "metadata.indices" + ); + assertNotNull(indices); + assertTrue(indices.containsKey(".security-7")); + // JsonMapView doesn't support . prefixed indices (splits on .) + @SuppressWarnings("unchecked") + String responseVersion = new XContentTestUtils.JsonMapView((Map) indices.get(".security-7")).get( + "migration_version.version" + ); + assertNotNull(responseVersion); + assertTrue(Integer.parseInt(responseVersion) >= version); + }); + } } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRoleMappingCleanupIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRoleMappingCleanupIT.java new file mode 100644 index 0000000000000..82d4050c044b1 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRoleMappingCleanupIT.java @@ -0,0 +1,146 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.TransportVersions.V_8_15_0; +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.Matchers.containsInAnyOrder; + +public class SecurityIndexRoleMappingCleanupIT extends AbstractUpgradeTestCase { + + public void testCleanupDuplicateMappings() throws Exception { + if (CLUSTER_TYPE == ClusterType.OLD) { + // If we're in a state where the same operator-defined role mappings can exist both in cluster state and the native store + // (V_8_15_0 transport added to security.role_mapping_cleanup feature added), create a state + // where the native store will need to be cleaned up + assumeTrue( + "Cleanup only needed before security.role_mapping_cleanup feature available in cluster", + clusterHasFeature("security.role_mapping_cleanup") == false + ); + assumeTrue( + "If role mappings are in cluster state but cleanup has not been performed yet, create duplicated role mappings", + minimumTransportVersion().onOrAfter(V_8_15_0) + ); + // Since the old cluster has role mappings in cluster state, but doesn't check duplicates, create duplicates + createNativeRoleMapping("operator_role_mapping_1", Map.of("meta", "test"), true); + createNativeRoleMapping("operator_role_mapping_2", Map.of("meta", "test"), true); + } else if (CLUSTER_TYPE == ClusterType.MIXED) { + // Create a native role mapping that doesn't conflict with anything before the migration run + createNativeRoleMapping("no_name_conflict", Map.of("meta", "test")); + } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + waitForSecurityMigrationCompletion(adminClient(), 2); + assertAllRoleMappings( + client(), + "operator_role_mapping_1" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "operator_role_mapping_2" + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX, + "no_name_conflict" + ); + // In the old cluster we might have created these (depending on the node features), so make sure they were removed + assertFalse(roleMappingExistsInSecurityIndex("operator_role_mapping_1")); + assertFalse(roleMappingExistsInSecurityIndex("operator_role_mapping_2")); + assertTrue(roleMappingExistsInSecurityIndex("no_name_conflict")); + // Make sure we can create and delete a conflicting role mapping again + createNativeRoleMapping("operator_role_mapping_1", Map.of("meta", "test"), true); + deleteNativeRoleMapping("operator_role_mapping_1", true); + } + } + + @SuppressWarnings("unchecked") + private boolean roleMappingExistsInSecurityIndex(String mappingName) throws IOException { + final Request request = new Request("POST", "/.security/_search"); + request.setJsonEntity(String.format(Locale.ROOT, """ + {"query":{"bool":{"must":[{"term":{"_id":"%s_%s"}}]}}}""", "role-mapping", mappingName)); + + request.setOptions( + expectWarnings( + "this request accesses system indices: [.security-7]," + + " but in a future major version, direct access to system indices will be prevented by default" + ) + ); + + Response response = adminClient().performRequest(request); + assertOK(response); + final Map responseMap = responseAsMap(response); + + Map hits = ((Map) responseMap.get("hits")); + return ((List) hits.get("hits")).isEmpty() == false; + } + + private void createNativeRoleMapping(String roleMappingName, Map metadata) throws IOException { + createNativeRoleMapping(roleMappingName, metadata, false); + } + + private void createNativeRoleMapping(String roleMappingName, Map metadata, boolean expectWarning) throws IOException { + final Request request = new Request("POST", "/_security/role_mapping/" + roleMappingName); + if (expectWarning) { + request.setOptions( + expectWarnings( + "A read-only role mapping with the same name [" + + roleMappingName + + "] has been previously defined in a configuration file. " + + "Both role mappings will be used to determine role assignments." + ) + ); + } + + BytesReference source = BytesReference.bytes( + jsonBuilder().map( + Map.of( + ExpressionRoleMapping.Fields.ROLES.getPreferredName(), + List.of("superuser"), + ExpressionRoleMapping.Fields.ENABLED.getPreferredName(), + true, + ExpressionRoleMapping.Fields.RULES.getPreferredName(), + Map.of("field", Map.of("username", "role-mapping-test-user")), + RoleDescriptor.Fields.METADATA.getPreferredName(), + metadata + ) + ) + ); + request.setJsonEntity(source.utf8ToString()); + assertOK(client().performRequest(request)); + } + + private void deleteNativeRoleMapping(String roleMappingName, boolean expectWarning) throws IOException { + final Request request = new Request("DELETE", "/_security/role_mapping/" + roleMappingName); + if (expectWarning) { + request.setOptions( + expectWarnings( + "A read-only role mapping with the same name [" + + roleMappingName + + "] has previously been defined in a configuration file. " + + "The native role mapping was deleted, but the read-only mapping will remain active " + + "and will be used to determine role assignments." + ) + ); + } + assertOK(client().performRequest(request)); + } + + private void assertAllRoleMappings(RestClient client, String... roleNames) throws IOException { + Request request = new Request("GET", "/_security/role_mapping"); + Response response = client.performRequest(request); + assertOK(response); + Map responseMap = responseAsMap(response); + + assertThat(responseMap.keySet(), containsInAnyOrder(roleNames)); + assertThat(responseMap.size(), is(roleNames.length)); + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java index d31130e970f03..6c34e68297aa0 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SecurityIndexRolesMetadataMigrationIT.java @@ -58,7 +58,7 @@ public void testRoleMigration() throws Exception { } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { createRoleWithMetadata(upgradedTestRole, Map.of("meta", "test")); assertTrue(canRolesBeMigrated()); - waitForMigrationCompletion(adminClient()); + waitForSecurityMigrationCompletion(adminClient(), 1); assertMigratedDocInSecurityIndex(oldTestRole, "meta", "test"); assertMigratedDocInSecurityIndex(mixed1TestRole, "meta", "test"); assertMigratedDocInSecurityIndex(mixed2TestRole, "meta", "test"); @@ -136,23 +136,6 @@ private static void assertNoMigration(RestClient adminClient) throws Exception { ); } - @SuppressWarnings("unchecked") - private static void waitForMigrationCompletion(RestClient adminClient) throws Exception { - final Request request = new Request("GET", "_cluster/state/metadata/" + INTERNAL_SECURITY_MAIN_INDEX_7); - assertBusy(() -> { - Response response = adminClient.performRequest(request); - assertOK(response); - Map responseMap = responseAsMap(response); - Map indicesMetadataMap = (Map) ((Map) responseMap.get("metadata")).get( - "indices" - ); - assertTrue(indicesMetadataMap.containsKey(INTERNAL_SECURITY_MAIN_INDEX_7)); - assertTrue( - ((Map) indicesMetadataMap.get(INTERNAL_SECURITY_MAIN_INDEX_7)).containsKey(MIGRATION_VERSION_CUSTOM_KEY) - ); - }); - } - private void createRoleWithMetadata(String roleName, Map metadata) throws IOException { final Request request = new Request("POST", "/_security/role/" + roleName); BytesReference source = BytesReference.bytes( diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/operator_defined_role_mappings.json b/x-pack/qa/rolling-upgrade/src/test/resources/operator_defined_role_mappings.json new file mode 100644 index 0000000000000..d897cabb8ab01 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/resources/operator_defined_role_mappings.json @@ -0,0 +1,38 @@ +{ + "metadata": { + "version": "2", + "compatibility": "8.4.0" + }, + "state": { + "role_mappings": { + "operator_role_mapping_1": { + "enabled": true, + "roles": [ + "kibana_user" + ], + "metadata": { + "from_file": true + }, + "rules": { + "field": { + "username": "role-mapping-test-user" + } + } + }, + "operator_role_mapping_2": { + "enabled": true, + "roles": [ + "fleet_user" + ], + "metadata": { + "from_file": true + }, + "rules": { + "field": { + "username": "role-mapping-test-user" + } + } + } + } + } +}