From 1a906897e6dbffb67226106cd1ecc4324626d242 Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Tue, 19 Oct 2021 18:52:22 -0600 Subject: [PATCH] Implement framework for migrating system indices (#78951) This PR adds a framework for migrating system indices as necessary prior to Elasticsearch upgrades. This framework uses REST APIs added in another commit: - GET _migration/system_features This API, which gets the status of "features" (plugins which own system indices) with regards to whether they need to be upgraded or not. As of this PR, this API also reports errors encountered while migrating system indices alongside the index that was being processed when this occurred. As an example of this error reporting: ```json { "feature_name": "logstash_management", "minimum_index_version": "8.0.0", "upgrade_status": "ERROR", "indices": [ { "index": ".logstash", "version": "8.0.0", "failure_cause": { "error": { "root_cause": [ { "type": "runtime_exception", "reason": "whoopsie", "stack_trace": "" } ], "type": "runtime_exception", "reason": "whoopsie", "stack_trace": "" } } } ] } ``` - POST _migration/system_features This API starts the migration process. The API for this has no changes, but when called, any system indices which need to be migrated will be migrated, with status information stored in the cluster state for later use by the GET _migration/system_features API. --- .../org/elasticsearch/client/MigrationIT.java | 11 +- .../GetFeatureUpgradeStatusResponseTests.java | 16 +- .../migration/apis/feature_upgrade.asciidoc | 3 +- .../migration/FeatureMigrationIT.java | 388 +++++++++++ .../migration/MultiFeatureMigrationIT.java | 302 +++++++++ .../upgrades/FeatureUpgradeIT.java | 2 +- .../GetFeatureUpgradeStatusResponse.java | 82 ++- ...ransportGetFeatureUpgradeStatusAction.java | 115 +++- .../TransportPostFeatureUpgradeAction.java | 58 +- .../elasticsearch/cluster/ClusterModule.java | 13 +- .../MetadataUpdateSettingsService.java | 2 - .../elasticsearch/indices/SystemIndices.java | 261 +++++--- .../java/org/elasticsearch/node/Node.java | 44 +- .../upgrades/FeatureMigrationResults.java | 194 ++++++ .../upgrades/MigrationResultsUpdateTask.java | 100 +++ .../SingleFeatureMigrationResult.java | 166 +++++ .../SystemIndexMigrationExecutor.java | 122 ++++ .../upgrades/SystemIndexMigrationInfo.java | 284 ++++++++ .../SystemIndexMigrationTaskParams.java | 75 +++ .../SystemIndexMigrationTaskState.java | 133 ++++ .../upgrades/SystemIndexMigrator.java | 610 ++++++++++++++++++ .../GetFeatureUpgradeStatusResponseTests.java | 36 +- ...ortGetFeatureUpgradeStatusActionTests.java | 17 +- .../FeatureMigrationResultsTests.java | 74 +++ .../SystemIndexMigrationTaskParamsTests.java | 50 ++ .../SystemIndexMigrationTaskStateTests.java | 103 +++ .../authz/privilege/SystemPrivilege.java | 6 +- .../authz/privilege/PrivilegeTests.java | 4 +- 28 files changed, 3079 insertions(+), 192 deletions(-) create mode 100644 modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java create mode 100644 modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java create mode 100644 server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java create mode 100644 server/src/main/java/org/elasticsearch/upgrades/MigrationResultsUpdateTask.java create mode 100644 server/src/main/java/org/elasticsearch/upgrades/SingleFeatureMigrationResult.java create mode 100644 server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationExecutor.java create mode 100644 server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationInfo.java create mode 100644 server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskParams.java create mode 100644 server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskState.java create mode 100644 server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java create mode 100644 server/src/test/java/org/elasticsearch/upgrades/FeatureMigrationResultsTests.java create mode 100644 server/src/test/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskParamsTests.java create mode 100644 server/src/test/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskStateTests.java diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java index 7ae162c02f2a5..67d4818d26fc0 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MigrationIT.java @@ -23,8 +23,8 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.nullValue; public class MigrationIT extends ESRestHighLevelClientTestCase { @@ -60,11 +60,8 @@ public void testGetFeatureUpgradeStatus() throws IOException { public void testPostFeatureUpgradeStatus() throws IOException { PostFeatureUpgradeRequest request = new PostFeatureUpgradeRequest(); PostFeatureUpgradeResponse response = highLevelClient().migration().postFeatureUpgrade(request, RequestOptions.DEFAULT); - assertThat(response.isAccepted(), equalTo(true)); - assertThat(response.getFeatures().size(), equalTo(1)); - PostFeatureUpgradeResponse.Feature feature = response.getFeatures().get(0); - assertThat(feature.getFeatureName(), equalTo("security")); - assertThat(response.getReason(), nullValue()); - assertThat(response.getElasticsearchException(), nullValue()); + assertThat(response.isAccepted(), equalTo(false)); + assertThat(response.getFeatures(), hasSize(0)); + assertThat(response.getReason(), equalTo("No system indices require migration")); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/GetFeatureUpgradeStatusResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/GetFeatureUpgradeStatusResponseTests.java index f6483dd1651b5..7b3bb7cf6004b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/GetFeatureUpgradeStatusResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/GetFeatureUpgradeStatusResponseTests.java @@ -40,10 +40,14 @@ protected org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStat randomAlphaOfLengthBetween(3, 20), randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), randomFrom(org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.values()), - randomList(4, - () -> new org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.IndexVersion( + randomList( + 4, + () -> new org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.IndexInfo( randomAlphaOfLengthBetween(3, 20), - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()))) + randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + null + ) + ) )), randomFrom(org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.values()) ); @@ -78,12 +82,12 @@ protected void assertInstances( assertThat(clientStatus.getIndexVersions(), hasSize(serverTestStatus.getIndexVersions().size())); for (int j = 0; i < clientStatus.getIndexVersions().size(); i++) { - org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.IndexVersion serverIndexVersion + org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.IndexInfo serverIndexInfo = serverTestStatus.getIndexVersions().get(j); GetFeatureUpgradeStatusResponse.IndexVersion clientIndexVersion = clientStatus.getIndexVersions().get(j); - assertThat(clientIndexVersion.getIndexName(), equalTo(serverIndexVersion.getIndexName())); - assertThat(clientIndexVersion.getVersion(), equalTo(serverIndexVersion.getVersion().toString())); + assertThat(clientIndexVersion.getIndexName(), equalTo(serverIndexInfo.getIndexName())); + assertThat(clientIndexVersion.getVersion(), equalTo(serverIndexInfo.getVersion().toString())); } } } diff --git a/docs/reference/migration/apis/feature_upgrade.asciidoc b/docs/reference/migration/apis/feature_upgrade.asciidoc index c6b226830ab22..738227c5eae25 100644 --- a/docs/reference/migration/apis/feature_upgrade.asciidoc +++ b/docs/reference/migration/apis/feature_upgrade.asciidoc @@ -24,7 +24,7 @@ and to trigger an automated system upgrade that might potentially involve downti ==== {api-prereq-title} * If the {es} {security-features} are enabled, you must have the `manage` -<> to use this API. (TODO: true?) +<> to use this API. [[feature-upgrade-api-example]] ==== {api-examples-title} @@ -144,6 +144,7 @@ Example response: ] } -------------------------------------------------- +// TESTRESPONSE[skip: can't actually upgrade system indices in these tests] This tells us that the security index is being upgraded. To check the overall status of the upgrade, call the endpoint with GET. diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java new file mode 100644 index 0000000000000..09765b472c770 --- /dev/null +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java @@ -0,0 +1,388 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.migration; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusAction; +import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusRequest; +import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse; +import org.elasticsearch.action.admin.cluster.migration.PostFeatureUpgradeAction; +import org.elasticsearch.action.admin.cluster.migration.PostFeatureUpgradeRequest; +import org.elasticsearch.action.admin.cluster.migration.PostFeatureUpgradeResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.upgrades.FeatureMigrationResults; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class FeatureMigrationIT extends ESIntegTestCase { + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).build(); + } + + @Override + protected boolean forbidPrivateIndexSettings() { + // We need to be able to set the index creation version manually. + return false; + } + + @Override + protected Collection> nodePlugins() { + List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(TestPlugin.class); + plugins.add(ReindexPlugin.class); + return plugins; + } + + public void testMigrateInternalManagedSystemIndex() throws Exception { + createSystemIndexForDescriptor(INTERNAL_MANAGED); + createSystemIndexForDescriptor(INTERNAL_UNMANAGED); + createSystemIndexForDescriptor(EXTERNAL_MANAGED); + createSystemIndexForDescriptor(EXTERNAL_UNMANAGED); + + ensureGreen(); + + SetOnce preUpgradeHookCalled = new SetOnce<>(); + SetOnce postUpgradeHookCalled = new SetOnce<>(); + TestPlugin.preMigrationHook.set(clusterState -> { + // Check that the ordering of these calls is correct. + assertThat(postUpgradeHookCalled.get(), nullValue()); + Map metadata = new HashMap<>(); + metadata.put("stringKey", "stringValue"); + metadata.put("intKey", 42); + { + Map innerMetadata = new HashMap<>(); + innerMetadata.put("innerKey", "innerValue"); + + metadata.put("mapKey", innerMetadata); + } + metadata.put("listKey", Arrays.asList(1, 2, 3, 4)); + preUpgradeHookCalled.set(true); + return metadata; + }); + + TestPlugin.postMigrationHook.set((clusterState, metadata) -> { + assertThat(preUpgradeHookCalled.get(), is(true)); + + assertThat( + metadata, + hasEntry("stringKey", "stringValue") + ); + assertThat(metadata, hasEntry("intKey", 42)); + assertThat(metadata, hasEntry("listKey", Arrays.asList(1,2,3,4))); + assertThat(metadata, hasKey("mapKey")); + @SuppressWarnings("unchecked") + Map innerMap = (Map) metadata.get("mapKey"); + assertThat(innerMap, hasEntry("innerKey", "innerValue")); + + // We shouldn't have any results in the cluster state as no features have fully finished yet. + FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, nullValue()); + postUpgradeHookCalled.set(true); + }); + + PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(); + PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, migrationRequest).get(); + assertThat(migrationResponse.getReason(), nullValue()); + assertThat(migrationResponse.getElasticsearchException(), nullValue()); + final Set migratingFeatures = migrationResponse.getFeatures() + .stream() + .map(PostFeatureUpgradeResponse.Feature::getFeatureName) + .collect(Collectors.toSet()); + assertThat(migratingFeatures, hasItem(FEATURE_NAME)); + + GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(); + assertBusy(() -> { + GetFeatureUpgradeStatusResponse statusResponse = client().execute(GetFeatureUpgradeStatusAction.INSTANCE, getStatusRequest) + .get(); + logger.info(Strings.toString(statusResponse)); + assertThat(statusResponse.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); + }); + + assertTrue("the pre-migration hook wasn't actually called", preUpgradeHookCalled.get()); + assertTrue("the post-migration hook wasn't actually called", postUpgradeHookCalled.get()); + + Metadata finalMetadata = client().admin().cluster().prepareState().get().getState().metadata(); + // Check that the results metadata is what we expect. + FeatureMigrationResults currentResults = finalMetadata.custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, notNullValue()); + assertThat(currentResults.getFeatureStatuses(), allOf(aMapWithSize(1), hasKey(FEATURE_NAME))); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).succeeded(), is(true)); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getFailedIndexName(), nullValue()); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getException(), nullValue()); + + assertIndexHasCorrectProperties( + finalMetadata, + ".int-man-old-reindexed-for-8", + INTERNAL_MANAGED_FLAG_VALUE, + true, + true, + Arrays.asList(".int-man-old", ".internal-managed-alias") + ); + assertIndexHasCorrectProperties( + finalMetadata, + ".int-unman-old-reindexed-for-8", + INTERNAL_UNMANAGED_FLAG_VALUE, + false, + true, + Collections.singletonList(".int-unman-old") + ); + assertIndexHasCorrectProperties( + finalMetadata, + ".ext-man-old-reindexed-for-8", + EXTERNAL_MANAGED_FLAG_VALUE, + true, + false, + Arrays.asList(".ext-man-old", ".external-managed-alias") + ); + assertIndexHasCorrectProperties( + finalMetadata, + ".ext-unman-old-reindexed-for-8", + EXTERNAL_UNMANAGED_FLAG_VALUE, + false, + false, + Collections.singletonList(".ext-unman-old") + ); + } + + public void assertIndexHasCorrectProperties( + Metadata metadata, + String indexName, + int settingsFlagValue, + boolean isManaged, + boolean isInternal, + Collection aliasNames) { + IndexMetadata imd = metadata.index(indexName); + assertThat(imd.getSettings().get(FlAG_SETTING_KEY), equalTo(Integer.toString(settingsFlagValue))); + final Map mapping = imd.mapping().getSourceAsMap(); + @SuppressWarnings("unchecked") + final Map meta = (Map) mapping.get("_meta"); + assertThat(meta.get(DESCRIPTOR_MANAGED_META_KEY), is(isManaged)); + assertThat(meta.get(DESCRIPTOR_INTERNAL_META_KEY), is(isInternal)); + + assertThat(imd.isSystem(), is(true)); + + Set actualAliasNames = imd.getAliases().keySet(); + assertThat(actualAliasNames, containsInAnyOrder(aliasNames.toArray())); + + IndicesStatsResponse indexStats = client().admin().indices().prepareStats(imd.getIndex().getName()).setDocs(true).get(); + assertThat(indexStats.getIndex(imd.getIndex().getName()).getTotal().getDocs().getCount(), is((long) INDEX_DOC_COUNT)); + } + + public void createSystemIndexForDescriptor(SystemIndexDescriptor descriptor) throws InterruptedException { + assertTrue( + "the strategy used below to create index names for descriptors without a primary index name only works for simple patterns", + descriptor.getIndexPattern().endsWith("*") + ); + String indexName = Optional.ofNullable(descriptor.getPrimaryIndex()).orElse(descriptor.getIndexPattern().replace("*", "old")); + CreateIndexRequestBuilder createRequest = prepareCreate(indexName); + createRequest.setWaitForActiveShards(ActiveShardCount.ALL); + if (descriptor.getSettings() != null) { + createRequest.setSettings(Settings.builder().put("index.version.created", Version.CURRENT).build()); + } else { + createRequest.setSettings( + createSimpleSettings( + Version.V_7_0_0, + descriptor.isInternal() ? INTERNAL_UNMANAGED_FLAG_VALUE : EXTERNAL_UNMANAGED_FLAG_VALUE + ) + ); + } + if (descriptor.getMappings() == null) { + createRequest.setMapping(createSimpleMapping(false, descriptor.isInternal())); + } + CreateIndexResponse response = createRequest.get(); + assertTrue(response.isShardsAcknowledged()); + + List docs = new ArrayList<>(INDEX_DOC_COUNT); + for (int i = 0; i < INDEX_DOC_COUNT; i++) { + docs.add(client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("some_field", "words words")); + } + indexRandom(true, docs); + IndicesStatsResponse indexStats = client().admin().indices().prepareStats(indexName).setDocs(true).get(); + assertThat(indexStats.getIndex(indexName).getTotal().getDocs().getCount(), is((long) INDEX_DOC_COUNT)); + } + + static final String VERSION_META_KEY = "version"; + static final Version META_VERSION = Version.CURRENT; + static final String DESCRIPTOR_MANAGED_META_KEY = "desciptor_managed"; + static final String DESCRIPTOR_INTERNAL_META_KEY = "descriptor_internal"; + static final String FEATURE_NAME = "A-test-feature"; // Sorts alphabetically before the feature from MultiFeatureMigrationIT + static final String ORIGIN = FeatureMigrationIT.class.getSimpleName(); + static final String FlAG_SETTING_KEY = IndexMetadata.INDEX_PRIORITY_SETTING.getKey(); + static final int INDEX_DOC_COUNT = 100; // arbitrarily chosen + + static final int INTERNAL_MANAGED_FLAG_VALUE = 1; + static final int INTERNAL_UNMANAGED_FLAG_VALUE = 2; + static final int EXTERNAL_MANAGED_FLAG_VALUE = 3; + static final int EXTERNAL_UNMANAGED_FLAG_VALUE = 4; + static final SystemIndexDescriptor INTERNAL_MANAGED = SystemIndexDescriptor.builder() + .setIndexPattern(".int-man-*") + .setAliasName(".internal-managed-alias") + .setPrimaryIndex(".int-man-old") + .setType(SystemIndexDescriptor.Type.INTERNAL_MANAGED) + .setSettings(createSimpleSettings(Version.V_7_0_0, INTERNAL_MANAGED_FLAG_VALUE)) + .setMappings(createSimpleMapping(true, true)) + .setOrigin(ORIGIN) + .setVersionMetaKey(VERSION_META_KEY) + .setAllowedElasticProductOrigins(Collections.emptyList()) + .setMinimumNodeVersion(Version.V_7_0_0) + .setPriorSystemIndexDescriptors(Collections.emptyList()) + .build(); + static final SystemIndexDescriptor INTERNAL_UNMANAGED = SystemIndexDescriptor.builder() + .setIndexPattern(".int-unman-*") + .setType(SystemIndexDescriptor.Type.INTERNAL_UNMANAGED) + .setOrigin(ORIGIN) + .setVersionMetaKey(VERSION_META_KEY) + .setAllowedElasticProductOrigins(Collections.emptyList()) + .setMinimumNodeVersion(Version.V_7_0_0) + .setPriorSystemIndexDescriptors(Collections.emptyList()) + .build(); + static final SystemIndexDescriptor EXTERNAL_MANAGED = SystemIndexDescriptor.builder() + .setIndexPattern(".ext-man-*") + .setAliasName(".external-managed-alias") + .setPrimaryIndex(".ext-man-old") + .setType(SystemIndexDescriptor.Type.EXTERNAL_MANAGED) + .setSettings(createSimpleSettings(Version.V_7_0_0, EXTERNAL_MANAGED_FLAG_VALUE)) + .setMappings(createSimpleMapping(true, false)) + .setOrigin(ORIGIN) + .setVersionMetaKey(VERSION_META_KEY) + .setAllowedElasticProductOrigins(Collections.singletonList(ORIGIN)) + .setMinimumNodeVersion(Version.V_7_0_0) + .setPriorSystemIndexDescriptors(Collections.emptyList()) + .build(); + static final SystemIndexDescriptor EXTERNAL_UNMANAGED = SystemIndexDescriptor.builder() + .setIndexPattern(".ext-unman-*") + .setType(SystemIndexDescriptor.Type.EXTERNAL_UNMANAGED) + .setOrigin(ORIGIN) + .setVersionMetaKey(VERSION_META_KEY) + .setAllowedElasticProductOrigins(Collections.singletonList(ORIGIN)) + .setMinimumNodeVersion(Version.V_7_0_0) + .setPriorSystemIndexDescriptors(Collections.emptyList()) + .build(); + + static Settings createSimpleSettings(Version creationVersion, int flagSettingValue) { + return Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .put(FlAG_SETTING_KEY, flagSettingValue) + .put("index.version.created", creationVersion) + .build(); + } + + static String createSimpleMapping(boolean descriptorManaged, boolean descriptorInternal) { + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + builder.startObject(); + { + builder.startObject("_meta"); + builder.field(VERSION_META_KEY, META_VERSION); + builder.field(DESCRIPTOR_MANAGED_META_KEY, descriptorManaged); + builder.field(DESCRIPTOR_INTERNAL_META_KEY, descriptorInternal); + builder.endObject(); + + builder.field("dynamic", "strict"); + builder.startObject("properties"); + { + builder.startObject("some_field"); + builder.field("type", "keyword"); + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + return Strings.toString(builder); + } catch (IOException e) { + // Just rethrow, it should be impossible for this to throw here + throw new AssertionError(e); + } + } + + public static class TestPlugin extends Plugin implements SystemIndexPlugin { + public static final AtomicReference>> preMigrationHook = new AtomicReference<>(); + public static final AtomicReference>> postMigrationHook = new AtomicReference<>(); + + public TestPlugin() { + + } + + @Override + public String getFeatureName() { + return FEATURE_NAME; + } + + @Override + public String getFeatureDescription() { + return "a plugin for testing system index migration"; + } + + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + return Arrays.asList(INTERNAL_MANAGED, INTERNAL_UNMANAGED, EXTERNAL_MANAGED, EXTERNAL_UNMANAGED); + } + + @Override + public void prepareForIndicesMigration(ClusterService clusterService, Client client, ActionListener> listener) { + listener.onResponse(preMigrationHook.get().apply(clusterService.state())); + } + + @Override + public void indicesMigrationComplete( + Map preUpgradeMetadata, + ClusterService clusterService, + Client client, + ActionListener listener + ) { + postMigrationHook.get().accept(clusterService.state(), preUpgradeMetadata); + listener.onResponse(true); + } + } +} diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java new file mode 100644 index 0000000000000..832990a973e53 --- /dev/null +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java @@ -0,0 +1,302 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.migration; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusAction; +import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusRequest; +import org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse; +import org.elasticsearch.action.admin.cluster.migration.PostFeatureUpgradeAction; +import org.elasticsearch.action.admin.cluster.migration.PostFeatureUpgradeRequest; +import org.elasticsearch.action.admin.cluster.migration.PostFeatureUpgradeResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.upgrades.FeatureMigrationResults; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.aMapWithSize; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class MultiFeatureMigrationIT extends FeatureMigrationIT { + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)).build(); + } + + @Override + protected boolean forbidPrivateIndexSettings() { + // We need to be able to set the index creation version manually. + return false; + } + + @Override + protected Collection> nodePlugins() { + List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(FeatureMigrationIT.TestPlugin.class); + plugins.add(SecondPlugin.class); + plugins.add(ReindexPlugin.class); + return plugins; + } + + // Sorts alphabetically after the feature from MultiFeatureMigrationIT + private static final String SECOND_FEATURE_NAME = "B-test-feature"; + private static final String ORIGIN = MultiFeatureMigrationIT.class.getSimpleName(); + private static final String VERSION_META_KEY = "version"; + static final int SECOND_FEATURE_IDX_FLAG_VALUE = 0; + + public void testMultipleFeatureMigration() throws Exception { + // All the indices from FeatureMigrationIT + createSystemIndexForDescriptor(INTERNAL_MANAGED); + createSystemIndexForDescriptor(INTERNAL_UNMANAGED); + createSystemIndexForDescriptor(EXTERNAL_MANAGED); + createSystemIndexForDescriptor(EXTERNAL_UNMANAGED); + // And our new one + createSystemIndexForDescriptor(SECOND_FEATURE_IDX_DESCIPTOR); + + ensureGreen(); + + SetOnce preMigrationHookCalled = new SetOnce<>(); + SetOnce postMigrationHookCalled = new SetOnce<>(); + SetOnce secondPluginPreMigrationHookCalled = new SetOnce<>(); + SetOnce secondPluginPostMigrationHookCalled = new SetOnce<>(); + + TestPlugin.preMigrationHook.set(clusterState -> { + // None of the other hooks should have been called yet. + assertThat(postMigrationHookCalled.get(), nullValue()); + assertThat(secondPluginPreMigrationHookCalled.get(), nullValue()); + assertThat(secondPluginPostMigrationHookCalled.get(), nullValue()); + Map metadata = new HashMap<>(); + metadata.put("stringKey", "first plugin value"); + + // We shouldn't have any results in the cluster state given no features have finished yet. + FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, nullValue()); + + preMigrationHookCalled.set(true); + return metadata; + }); + + TestPlugin.postMigrationHook.set((clusterState, metadata) -> { + // Check that the hooks have been called or not as expected. + assertThat(preMigrationHookCalled.get(), is(true)); + assertThat(secondPluginPreMigrationHookCalled.get(), nullValue()); + assertThat(secondPluginPostMigrationHookCalled.get(), nullValue()); + + assertThat( + metadata, + hasEntry("stringKey", "first plugin value") + ); + + // We shouldn't have any results in the cluster state given no features have finished yet. + FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, nullValue()); + + postMigrationHookCalled.set(true); + }); + + SecondPlugin.preMigrationHook.set(clusterState -> { + // Check that the hooks have been called or not as expected. + assertThat(preMigrationHookCalled.get(), is(true)); + assertThat(postMigrationHookCalled.get(), is(true)); + assertThat(secondPluginPostMigrationHookCalled.get(), nullValue()); + + Map metadata = new HashMap<>(); + metadata.put("stringKey", "second plugin value"); + + // But now, we should have results, as we're in a new feature! + FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, notNullValue()); + assertThat(currentResults.getFeatureStatuses(), allOf(aMapWithSize(1), hasKey(FEATURE_NAME))); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).succeeded(), is(true)); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getFailedIndexName(), nullValue()); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getException(), nullValue()); + + secondPluginPreMigrationHookCalled.set(true); + return metadata; + }); + + SecondPlugin.postMigrationHook.set((clusterState, metadata) -> { + // Check that the hooks have been called or not as expected. + assertThat(preMigrationHookCalled.get(), is(true)); + assertThat(postMigrationHookCalled.get(), is(true)); + assertThat(secondPluginPreMigrationHookCalled.get(), is(true)); + + assertThat( + metadata, + hasEntry("stringKey", "second plugin value") + ); + + // And here, the results should be the same, as we haven't updated the state with this feature's status yet. + FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, notNullValue()); + assertThat(currentResults.getFeatureStatuses(), allOf(aMapWithSize(1), hasKey(FEATURE_NAME))); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).succeeded(), is(true)); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getFailedIndexName(), nullValue()); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getException(), nullValue()); + + secondPluginPostMigrationHookCalled.set(true); + }); + + PostFeatureUpgradeRequest migrationRequest = new PostFeatureUpgradeRequest(); + PostFeatureUpgradeResponse migrationResponse = client().execute(PostFeatureUpgradeAction.INSTANCE, migrationRequest).get(); + assertThat(migrationResponse.getReason(), nullValue()); + assertThat(migrationResponse.getElasticsearchException(), nullValue()); + final Set migratingFeatures = migrationResponse.getFeatures() + .stream() + .map(PostFeatureUpgradeResponse.Feature::getFeatureName) + .collect(Collectors.toSet()); + assertThat(migratingFeatures, hasItems(FEATURE_NAME, SECOND_FEATURE_NAME)); + + GetFeatureUpgradeStatusRequest getStatusRequest = new GetFeatureUpgradeStatusRequest(); + assertBusy(() -> { + GetFeatureUpgradeStatusResponse statusResponse = client().execute(GetFeatureUpgradeStatusAction.INSTANCE, getStatusRequest) + .get(); + logger.info(Strings.toString(statusResponse)); + assertThat(statusResponse.getUpgradeStatus(), equalTo(GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED)); + }); + + assertTrue("the first plugin's pre-migration hook wasn't actually called", preMigrationHookCalled.get()); + assertTrue("the first plugin's post-migration hook wasn't actually called", postMigrationHookCalled.get()); + + assertTrue("the second plugin's pre-migration hook wasn't actually called", secondPluginPreMigrationHookCalled.get()); + assertTrue("the second plugin's post-migration hook wasn't actually called", secondPluginPostMigrationHookCalled.get()); + + Metadata finalMetadata = client().admin().cluster().prepareState().get().getState().metadata(); + // Check that the results metadata is what we expect + FeatureMigrationResults currentResults = finalMetadata.custom(FeatureMigrationResults.TYPE); + assertThat(currentResults, notNullValue()); + assertThat(currentResults.getFeatureStatuses(), allOf(aMapWithSize(2), hasKey(FEATURE_NAME), hasKey(SECOND_FEATURE_NAME))); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).succeeded(), is(true)); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getFailedIndexName(), nullValue()); + assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).getException(), nullValue()); + assertThat(currentResults.getFeatureStatuses().get(SECOND_FEATURE_NAME).succeeded(), is(true)); + assertThat(currentResults.getFeatureStatuses().get(SECOND_FEATURE_NAME).getFailedIndexName(), nullValue()); + assertThat(currentResults.getFeatureStatuses().get(SECOND_FEATURE_NAME).getException(), nullValue()); + + // Finally, verify that all the indices exist and have the properties we expect. + assertIndexHasCorrectProperties( + finalMetadata, + ".int-man-old-reindexed-for-8", + INTERNAL_MANAGED_FLAG_VALUE, + true, + true, + Arrays.asList(".int-man-old", ".internal-managed-alias") + ); + assertIndexHasCorrectProperties( + finalMetadata, + ".int-unman-old-reindexed-for-8", + INTERNAL_UNMANAGED_FLAG_VALUE, + false, + true, + Collections.singletonList(".int-unman-old") + ); + assertIndexHasCorrectProperties( + finalMetadata, + ".ext-man-old-reindexed-for-8", + EXTERNAL_MANAGED_FLAG_VALUE, + true, + false, + Arrays.asList(".ext-man-old", ".external-managed-alias") + ); + assertIndexHasCorrectProperties( + finalMetadata, + ".ext-unman-old-reindexed-for-8", + EXTERNAL_UNMANAGED_FLAG_VALUE, + false, + false, + Collections.singletonList(".ext-unman-old") + ); + + assertIndexHasCorrectProperties( + finalMetadata, + ".second-int-man-old-reindexed-for-8", + SECOND_FEATURE_IDX_FLAG_VALUE, + true, + true, + Arrays.asList(".second-int-man-old", ".second-internal-managed-alias") + ); + } + + private static final SystemIndexDescriptor SECOND_FEATURE_IDX_DESCIPTOR = SystemIndexDescriptor.builder() + .setIndexPattern(".second-int-man-*") + .setAliasName(".second-internal-managed-alias") + .setPrimaryIndex(".second-int-man-old") + .setType(SystemIndexDescriptor.Type.INTERNAL_MANAGED) + .setSettings(createSimpleSettings(Version.V_7_0_0, 0)) + .setMappings(createSimpleMapping(true, true)) + .setOrigin(ORIGIN) + .setVersionMetaKey(VERSION_META_KEY) + .setAllowedElasticProductOrigins(Collections.emptyList()) + .setMinimumNodeVersion(Version.V_7_0_0) + .setPriorSystemIndexDescriptors(Collections.emptyList()) + .build(); + + public static class SecondPlugin extends Plugin implements SystemIndexPlugin { + + private static final AtomicReference>> preMigrationHook = new AtomicReference<>(); + private static final AtomicReference>> postMigrationHook = new AtomicReference<>(); + + public SecondPlugin() { + + } + + @Override public String getFeatureName() { + return SECOND_FEATURE_NAME; + } + + @Override public String getFeatureDescription() { + return "a plugin for test system index migration with multiple features"; + } + + @Override public Collection getSystemIndexDescriptors(Settings settings) { + return Collections.singletonList(SECOND_FEATURE_IDX_DESCIPTOR); + } + + @Override public void prepareForIndicesMigration( + ClusterService clusterService, Client client, ActionListener> listener) { + listener.onResponse(preMigrationHook.get().apply(clusterService.state())); + } + + @Override public void indicesMigrationComplete( + Map preUpgradeMetadata, ClusterService clusterService, Client client, ActionListener listener) { + postMigrationHook.get().accept(clusterService.state(), preUpgradeMetadata); + listener.onResponse(true); + } + } +} diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java index d8502eadd5915..0591e0521ff78 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java @@ -91,7 +91,7 @@ public void testGetFeatureUpgradeStatus() throws Exception { assertThat(feature.size(), equalTo(4)); assertThat(feature.get("minimum_index_version"), equalTo(UPGRADE_FROM_VERSION.toString())); - if (UPGRADE_FROM_VERSION.before(Version.CURRENT.minimumIndexCompatibilityVersion())) { + if (UPGRADE_FROM_VERSION.before(Version.V_8_0_0)) { assertThat(feature.get("migration_status"), equalTo("MIGRATION_NEEDED")); } else { assertThat(feature.get("migration_status"), equalTo("NO_MIGRATION_NEEDED")); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java index 68b0771595593..56b068fa2101d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java @@ -8,17 +8,20 @@ package org.elasticsearch.action.admin.cluster.migration; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Objects; /** @@ -98,9 +101,18 @@ public String toString() { } public enum UpgradeStatus { - MIGRATION_NEEDED, NO_MIGRATION_NEEDED, - IN_PROGRESS + MIGRATION_NEEDED, + IN_PROGRESS, + ERROR; + + public static UpgradeStatus combine(UpgradeStatus... statuses) { + int statusOrd = 0; + for (UpgradeStatus status : statuses) { + statusOrd = Math.max(status.ordinal(), statusOrd); + } + return UpgradeStatus.values()[statusOrd]; + } } /** @@ -111,20 +123,20 @@ public static class FeatureUpgradeStatus implements Writeable, ToXContentObject private final String featureName; private final Version minimumIndexVersion; private final UpgradeStatus upgradeStatus; - private final List indexVersions; + private final List indexInfos; /** * @param featureName Name of the feature * @param minimumIndexVersion Earliest Elasticsearch version used to create a system index for this feature * @param upgradeStatus Whether the feature needs to be upgraded - * @param indexVersions A list of this feature's concrete indices and the Elasticsearch version that created them + * @param indexInfos A list of this feature's concrete indices and the Elasticsearch version that created them */ public FeatureUpgradeStatus(String featureName, Version minimumIndexVersion, - UpgradeStatus upgradeStatus, List indexVersions) { + UpgradeStatus upgradeStatus, List indexInfos) { this.featureName = featureName; this.minimumIndexVersion = minimumIndexVersion; this.upgradeStatus = upgradeStatus; - this.indexVersions = indexVersions; + this.indexInfos = indexInfos; } /** @@ -135,7 +147,7 @@ public FeatureUpgradeStatus(StreamInput in) throws IOException { this.featureName = in.readString(); this.minimumIndexVersion = Version.readVersion(in); this.upgradeStatus = in.readEnum(UpgradeStatus.class); - this.indexVersions = in.readList(IndexVersion::new); + this.indexInfos = in.readList(IndexInfo::new); } public String getFeatureName() { @@ -150,8 +162,8 @@ public UpgradeStatus getUpgradeStatus() { return this.upgradeStatus; } - public List getIndexVersions() { - return this.indexVersions; + public List getIndexVersions() { + return this.indexInfos; } @Override @@ -159,7 +171,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(this.featureName); Version.writeVersion(this.minimumIndexVersion, out); out.writeEnum(this.upgradeStatus); - out.writeList(this.indexVersions); + out.writeList(this.indexInfos); } @Override @@ -169,7 +181,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("minimum_index_version", this.minimumIndexVersion.toString()); builder.field("migration_status", this.upgradeStatus); builder.startArray("indices"); - for (IndexVersion version : this.indexVersions) { + for (IndexInfo version : this.indexInfos) { builder.value(version); } builder.endArray(); @@ -185,12 +197,12 @@ public boolean equals(Object o) { return Objects.equals(featureName, that.featureName) && Objects.equals(minimumIndexVersion, that.minimumIndexVersion) && Objects.equals(upgradeStatus, that.upgradeStatus) - && Objects.equals(indexVersions, that.indexVersions); + && Objects.equals(indexInfos, that.indexInfos); } @Override public int hashCode() { - return Objects.hash(featureName, minimumIndexVersion, upgradeStatus, indexVersions); + return Objects.hash(featureName, minimumIndexVersion, upgradeStatus, indexInfos); } @Override @@ -199,7 +211,7 @@ public String toString() { "featureName='" + featureName + '\'' + ", minimumIndexVersion='" + minimumIndexVersion + '\'' + ", upgradeStatus='" + upgradeStatus + '\'' + - ", indexVersions=" + indexVersions + + ", indexInfos=" + indexInfos + '}'; } } @@ -207,26 +219,38 @@ public String toString() { /** * A data class that holds an index name and the version of Elasticsearch with which that index was created */ - public static class IndexVersion implements Writeable, ToXContentObject { + public static class IndexInfo implements Writeable, ToXContentObject { + private static final Map STACK_TRACE_ENABLED_PARAMS = + Map.of(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false"); + private final String indexName; private final Version version; + @Nullable private final Exception exception; // Present if this index failed /** * @param indexName Name of the index * @param version Version of Elasticsearch that created the index + * @param exception The exception that this index's migration failed with, if applicable */ - public IndexVersion(String indexName, Version version) { + public IndexInfo(String indexName, Version version, Exception exception) { this.indexName = indexName; this.version = version; + this.exception = exception; } /** * @param in A stream input for a serialized index version object * @throws IOException if we can't deserialize the object */ - public IndexVersion(StreamInput in) throws IOException { + public IndexInfo(StreamInput in) throws IOException { this.indexName = in.readString(); this.version = Version.readVersion(in); + boolean hasException = in.readBoolean(); + if (hasException) { + this.exception = in.readException(); + } else { + this.exception = null; + } } public String getIndexName() { @@ -237,17 +261,36 @@ public Version getVersion() { return this.version; } + public Exception getException() { + return this.exception; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(this.indexName); Version.writeVersion(this.version, out); + if (exception != null) { + out.writeBoolean(true); + out.writeException(this.exception); + } else { + out.writeBoolean(false); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + Params exceptionParams = new DelegatingMapParams(STACK_TRACE_ENABLED_PARAMS, params); + builder.startObject(); builder.field("index", this.indexName); builder.field("version", this.version.toString()); + if (exception != null) { + builder.startObject("failure_cause"); + { + ElasticsearchException.generateFailureXContent(builder, exceptionParams, exception, true); + } + builder.endObject(); + } builder.endObject(); return builder; } @@ -256,7 +299,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - IndexVersion that = (IndexVersion) o; + IndexInfo that = (IndexInfo) o; return indexName.equals(that.indexName) && version.equals(that.version); } @@ -267,9 +310,10 @@ public int hashCode() { @Override public String toString() { - return "IndexVersion{" + + return "IndexInfo{" + "indexName='" + indexName + '\'' + ", version='" + version + '\'' + + ", exception='" + exception.getMessage() + "'" + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java index 8670623ff5c23..04b1031cd6dc7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java @@ -19,25 +19,38 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.upgrades.FeatureMigrationResults; +import org.elasticsearch.upgrades.SingleFeatureMigrationResult; +import org.elasticsearch.upgrades.SystemIndexMigrationTaskState; import java.util.Collection; +import java.util.Comparator; import java.util.List; -import java.util.Map; +import java.util.Optional; import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.ERROR; +import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.IN_PROGRESS; import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED; import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.MIGRATION_NEEDED; +import static org.elasticsearch.upgrades.SystemIndexMigrationTaskParams.SYSTEM_INDEX_UPGRADE_TASK_NAME; /** * Transport class for the get feature upgrade status action */ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeAction< - GetFeatureUpgradeStatusRequest, - GetFeatureUpgradeStatusResponse> { + GetFeatureUpgradeStatusRequest, + GetFeatureUpgradeStatusResponse> { + + /** + * This version is only valid for >=8.0.0 and should be changed on backport. + */ + public static final Version NO_UPGRADE_REQUIRED_VERSION = Version.V_8_0_0; private final SystemIndices systemIndices; @@ -65,55 +78,91 @@ public TransportGetFeatureUpgradeStatusAction( } @Override - protected void masterOperation(Task task, GetFeatureUpgradeStatusRequest request, ClusterState state, - ActionListener listener) throws Exception { - - List features = systemIndices.getFeatures().entrySet().stream() - .sorted(Map.Entry.comparingByKey()) - .map(entry -> getFeatureUpgradeStatus(state, entry)) + protected void masterOperation( + Task task, + GetFeatureUpgradeStatusRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + + List features = systemIndices.getFeatures() + .values() + .stream() + .sorted(Comparator.comparing(SystemIndices.Feature::getName)) + .map(feature -> getFeatureUpgradeStatus(state, feature)) .collect(Collectors.toList()); - boolean isUpgradeNeeded = features.stream() - .map(GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus::getMinimumIndexVersion) - .min(Version::compareTo) - .orElse(Version.CURRENT) - .before(Version.V_7_0_0); + GetFeatureUpgradeStatusResponse.UpgradeStatus status = features.stream() + .map(GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus::getUpgradeStatus) + .reduce(GetFeatureUpgradeStatusResponse.UpgradeStatus::combine) + .orElseGet(() -> { + assert false : "get feature statuses API doesn't have any features"; + return NO_MIGRATION_NEEDED; + }); - listener.onResponse(new GetFeatureUpgradeStatusResponse(features, isUpgradeNeeded ? MIGRATION_NEEDED : NO_MIGRATION_NEEDED)); + listener.onResponse(new GetFeatureUpgradeStatusResponse(features, status)); } - // visible for testing static GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus getFeatureUpgradeStatus( - ClusterState state, Map.Entry entry) { + ClusterState state, + SystemIndices.Feature feature + ) { + String featureName = feature.getName(); - String featureName = entry.getKey(); - SystemIndices.Feature feature = entry.getValue(); + final String currentFeature = Optional.ofNullable( + state.metadata().custom(PersistentTasksCustomMetadata.TYPE) + ) + .map(tasksMetdata -> tasksMetdata.getTask(SYSTEM_INDEX_UPGRADE_TASK_NAME)) + .map(task -> task.getState()) + .map(taskState -> ((SystemIndexMigrationTaskState) taskState).getCurrentFeature()) + .orElse(null); - List indexVersions = getIndexVersions(state, feature); + List indexInfos = getIndexInfos(state, feature); - Version minimumVersion = indexVersions.stream() - .map(GetFeatureUpgradeStatusResponse.IndexVersion::getVersion) + Version minimumVersion = indexInfos.stream() + .map(GetFeatureUpgradeStatusResponse.IndexInfo::getVersion) .min(Version::compareTo) .orElse(Version.CURRENT); - - return new GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus( - featureName, - minimumVersion, - minimumVersion.before(Version.V_7_0_0) ? MIGRATION_NEEDED : NO_MIGRATION_NEEDED, - indexVersions - ); + GetFeatureUpgradeStatusResponse.UpgradeStatus initialStatus; + if (featureName.equals(currentFeature)) { + initialStatus = IN_PROGRESS; + } else if (minimumVersion.before(NO_UPGRADE_REQUIRED_VERSION)) { + initialStatus = MIGRATION_NEEDED; + } else { + initialStatus = NO_MIGRATION_NEEDED; + } + + GetFeatureUpgradeStatusResponse.UpgradeStatus status = indexInfos.stream() + .filter(idxInfo -> idxInfo.getException() != null) + .findFirst() + .map(idxInfo -> ERROR) + .map(idxStatus -> GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(idxStatus, initialStatus)) + .orElse(initialStatus); + + return new GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus(featureName, minimumVersion, status, indexInfos); } // visible for testing - static List getIndexVersions(ClusterState state, SystemIndices.Feature feature) { + static List getIndexInfos(ClusterState state, SystemIndices.Feature feature) { + final SingleFeatureMigrationResult featureStatus = Optional.ofNullable( + (FeatureMigrationResults) state.metadata().custom(FeatureMigrationResults.TYPE) + ).map(FeatureMigrationResults::getFeatureStatuses).map(results -> results.get(feature.getName())).orElse(null); + + final String failedFeatureName = featureStatus == null ? null : featureStatus.getFailedIndexName(); + final Exception exception = featureStatus == null ? null : featureStatus.getException(); + return Stream.of(feature.getIndexDescriptors(), feature.getAssociatedIndexDescriptors()) .flatMap(Collection::stream) .flatMap(descriptor -> descriptor.getMatchingIndices(state.metadata()).stream()) .sorted(String::compareTo) .map(index -> state.metadata().index(index)) - .map(indexMetadata -> new GetFeatureUpgradeStatusResponse.IndexVersion( - indexMetadata.getIndex().getName(), - indexMetadata.getCreationVersion())) + .map( + indexMetadata -> new GetFeatureUpgradeStatusResponse.IndexInfo( + indexMetadata.getIndex().getName(), + indexMetadata.getCreationVersion(), + indexMetadata.getIndex().getName().equals(failedFeatureName) ? exception : null + ) + ) .collect(Collectors.toList()); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java index 3ede3d4c4b463..b7ea3d4c7f817 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportPostFeatureUpgradeAction.java @@ -8,6 +8,9 @@ package org.elasticsearch.action.admin.cluster.migration; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -18,12 +21,18 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.upgrades.SystemIndexMigrationTaskParams; -import java.util.ArrayList; +import java.util.Comparator; import java.util.List; +import java.util.stream.Collectors; + +import static org.elasticsearch.action.admin.cluster.migration.TransportGetFeatureUpgradeStatusAction.getFeatureUpgradeStatus; +import static org.elasticsearch.upgrades.SystemIndexMigrationTaskParams.SYSTEM_INDEX_UPGRADE_TASK_NAME; /** * Transport action for post feature upgrade action @@ -31,8 +40,10 @@ public class TransportPostFeatureUpgradeAction extends TransportMasterNodeAction< PostFeatureUpgradeRequest, PostFeatureUpgradeResponse> { + private static final Logger logger = LogManager.getLogger(TransportPostFeatureUpgradeAction.class); private final SystemIndices systemIndices; + private final PersistentTasksService persistentTasksService; @Inject public TransportPostFeatureUpgradeAction( @@ -41,7 +52,8 @@ public TransportPostFeatureUpgradeAction( ActionFilters actionFilters, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, - SystemIndices systemIndices + SystemIndices systemIndices, + PersistentTasksService persistentTasksService ) { super( PostFeatureUpgradeAction.NAME, @@ -55,20 +67,46 @@ public TransportPostFeatureUpgradeAction( ThreadPool.Names.SAME ); this.systemIndices = systemIndices; + this.persistentTasksService = persistentTasksService; } @Override - protected void masterOperation(Task task, PostFeatureUpgradeRequest request, ClusterState state, - ActionListener listener) throws Exception { - List features = new ArrayList<>(); - features.add(new PostFeatureUpgradeResponse.Feature("security")); - listener.onResponse(new PostFeatureUpgradeResponse( - // TODO: implement operation for this action - true, features, null, null)); + protected void masterOperation( + Task task, + PostFeatureUpgradeRequest request, + ClusterState state, + ActionListener listener + ) throws Exception { + List featuresToMigrate = systemIndices.getFeatures() + .values() + .stream() + .map(feature -> getFeatureUpgradeStatus(state, feature)) + .filter(status -> status.getUpgradeStatus().equals(GetFeatureUpgradeStatusResponse.UpgradeStatus.MIGRATION_NEEDED)) + .map(GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus::getFeatureName) + .map(PostFeatureUpgradeResponse.Feature::new) + .sorted(Comparator.comparing(PostFeatureUpgradeResponse.Feature::getFeatureName)) // consistent ordering to simplify testing + .collect(Collectors.toList()); + + if (featuresToMigrate.isEmpty() == false) { + persistentTasksService.sendStartRequest( + SYSTEM_INDEX_UPGRADE_TASK_NAME, + SYSTEM_INDEX_UPGRADE_TASK_NAME, + new SystemIndexMigrationTaskParams(), + ActionListener.wrap(startedTask -> { + listener.onResponse(new PostFeatureUpgradeResponse(true, featuresToMigrate, null, null)); + }, ex -> { + logger.error("failed to start system index upgrade task", ex); + + listener.onResponse(new PostFeatureUpgradeResponse(false, null, null, new ElasticsearchException(ex))); + }) + ); + } else { + listener.onResponse(new PostFeatureUpgradeResponse(false, null, "No system indices require migration", null)); + } } @Override protected ClusterBlockException checkBlock(PostFeatureUpgradeRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index e7bc6e4f7ce42..302a0e02bcd71 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.metadata.MetadataMappingService; -import org.elasticsearch.cluster.metadata.MetadataUpdateSettingsService; import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; import org.elasticsearch.cluster.routing.DelayedAllocationService; @@ -50,7 +49,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; - import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; @@ -60,7 +58,6 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; - import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.ingest.IngestMetadata; @@ -71,8 +68,9 @@ import org.elasticsearch.snapshots.SnapshotsInfoService; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskResultsService; -import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.upgrades.FeatureMigrationResults; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xcontent.ParseField; import java.util.ArrayList; import java.util.Collection; @@ -138,6 +136,12 @@ public static List getNamedWriteables() { ComposableIndexTemplateMetadata::readDiffFrom); registerMetadataCustom(entries, DataStreamMetadata.TYPE, DataStreamMetadata::new, DataStreamMetadata::readDiffFrom); registerMetadataCustom(entries, NodesShutdownMetadata.TYPE, NodesShutdownMetadata::new, NodesShutdownMetadata::readDiffFrom); + registerMetadataCustom( + entries, + FeatureMigrationResults.TYPE, + FeatureMigrationResults::new, + FeatureMigrationResults::readDiffFrom + ); // Task Status (not Diffable) entries.add(new Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new)); @@ -266,7 +270,6 @@ protected void configure() { bind(MetadataIndexStateService.class).asEagerSingleton(); bind(MetadataMappingService.class).asEagerSingleton(); bind(MetadataIndexAliasesService.class).asEagerSingleton(); - bind(MetadataUpdateSettingsService.class).asEagerSingleton(); bind(MetadataIndexTemplateService.class).asEagerSingleton(); bind(IndexNameExpressionResolver.class).toInstance(indexNameExpressionResolver); bind(DelayedAllocationService.class).asEagerSingleton(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java index e25808bc1d24f..9800f732e6d3a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -22,7 +22,6 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; @@ -57,7 +56,6 @@ public class MetadataUpdateSettingsService { private final ShardLimitValidator shardLimitValidator; private final ThreadPool threadPool; - @Inject public MetadataUpdateSettingsService(ClusterService clusterService, AllocationService allocationService, IndexScopedSettings indexScopedSettings, IndicesService indicesService, ShardLimitValidator shardLimitValidator, ThreadPool threadPool) { diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java index 0be7dca237845..eafe27ad446e6 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java @@ -23,13 +23,13 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.core.Booleans; -import org.elasticsearch.core.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.TriConsumer; -import org.elasticsearch.core.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.snapshots.SnapshotsService; @@ -64,7 +64,8 @@ public class SystemIndices { private static final Automaton EMPTY = Automata.makeEmpty(); private static final Map SERVER_SYSTEM_INDEX_DESCRIPTORS = Map.of( - TASKS_FEATURE_NAME, new Feature(TASKS_FEATURE_NAME, "Manages task results", List.of(TASKS_DESCRIPTOR)) + TASKS_FEATURE_NAME, + new Feature(TASKS_FEATURE_NAME, "Manages task results", List.of(TASKS_DESCRIPTOR)) ); private final Automaton systemNameAutomaton; @@ -107,7 +108,8 @@ static void ensurePatternsAllowSuffix(Map features) { final List descriptorsWithNoRoomForSuffix = features.entrySet() .stream() .flatMap( - feature -> feature.getValue().getIndexDescriptors() + feature -> feature.getValue() + .getIndexDescriptors() .stream() // The below filter & map are inside the enclosing flapMap so we have access to both the feature and the descriptor .filter(descriptor -> overlaps(descriptor.getIndexPattern(), suffixPattern) == false) @@ -158,31 +160,38 @@ private static Map getProductToSystemIndicesMap(M for (Feature feature : descriptors.values()) { feature.getIndexDescriptors().forEach(systemIndexDescriptor -> { if (systemIndexDescriptor.isExternal()) { - systemIndexDescriptor.getAllowedElasticProductOrigins().forEach(origin -> - productToSystemIndicesMap.compute(origin, (key, value) -> { + systemIndexDescriptor.getAllowedElasticProductOrigins() + .forEach(origin -> productToSystemIndicesMap.compute(origin, (key, value) -> { Automaton automaton = SystemIndexDescriptor.buildAutomaton( - systemIndexDescriptor.getIndexPattern(), systemIndexDescriptor.getAliasName()); + systemIndexDescriptor.getIndexPattern(), + systemIndexDescriptor.getAliasName() + ); return value == null ? automaton : Operations.union(value, automaton); - }) - ); + })); } }); feature.getDataStreamDescriptors().forEach(dataStreamDescriptor -> { if (dataStreamDescriptor.isExternal()) { - dataStreamDescriptor.getAllowedElasticProductOrigins().forEach(origin -> - productToSystemIndicesMap.compute(origin, (key, value) -> { + dataStreamDescriptor.getAllowedElasticProductOrigins() + .forEach(origin -> productToSystemIndicesMap.compute(origin, (key, value) -> { Automaton automaton = SystemIndexDescriptor.buildAutomaton( - dataStreamDescriptor.getBackingIndexPattern(), dataStreamDescriptor.getDataStreamName()); + dataStreamDescriptor.getBackingIndexPattern(), + dataStreamDescriptor.getDataStreamName() + ); return value == null ? automaton : Operations.union(value, automaton); - }) - ); + })); } }); } - return productToSystemIndicesMap.entrySet().stream() - .collect(Collectors.toUnmodifiableMap(Entry::getKey, entry -> - new CharacterRunAutomaton(MinimizationOperations.minimize(entry.getValue(), Integer.MAX_VALUE)))); + return productToSystemIndicesMap.entrySet() + .stream() + .collect( + Collectors.toUnmodifiableMap( + Entry::getKey, + entry -> new CharacterRunAutomaton(MinimizationOperations.minimize(entry.getValue(), Integer.MAX_VALUE)) + ) + ); } /** @@ -235,10 +244,23 @@ public Automaton getSystemNameAutomaton() { return systemNameAutomaton; } + /** + * Checks whether an index is a net-new system index, meaning we can apply non-BWC behavior to it. + * @param indexName The index name to check. + * @return {@code true} if the given index is covered by a net-new system index descriptor, {@code false} otherwise. + */ public boolean isNetNewSystemIndex(String indexName) { return netNewSystemIndexAutomaton.run(indexName); } + /** + * Used to determine which executor should be used for operations on this index. See {@link ExecutorSelector} docs for + * details. + */ + public ExecutorSelector getExecutorSelector() { + return executorSelector; + } + /** * Finds a single matching {@link SystemIndexDescriptor}, if any, for the given index name. * @param name the name of the index @@ -246,7 +268,8 @@ public boolean isNetNewSystemIndex(String indexName) { * @throws IllegalStateException if multiple descriptors match the name */ public @Nullable SystemIndexDescriptor findMatchingDescriptor(String name) { - final List matchingDescriptors = featureDescriptors.values().stream() + final List matchingDescriptors = featureDescriptors.values() + .stream() .flatMap(feature -> feature.getIndexDescriptors().stream()) .filter(descriptor -> descriptor.matchesIndexPattern(name)) .collect(toUnmodifiableList()); @@ -257,13 +280,20 @@ public boolean isNetNewSystemIndex(String indexName) { return matchingDescriptors.get(0); } else { // This should be prevented by failing on overlapping patterns at startup time, but is here just in case. - StringBuilder errorMessage = new StringBuilder() - .append("index name [") + StringBuilder errorMessage = new StringBuilder().append("index name [") .append(name) .append("] is claimed as a system index by multiple system index patterns: [") - .append(matchingDescriptors.stream() - .map(descriptor -> "pattern: [" + descriptor.getIndexPattern() + - "], description: [" + descriptor.getDescription() + "]").collect(Collectors.joining("; "))); + .append( + matchingDescriptors.stream() + .map( + descriptor -> "pattern: [" + + descriptor.getIndexPattern() + + "], description: [" + + descriptor.getDescription() + + "]" + ) + .collect(Collectors.joining("; ")) + ); // Throw AssertionError if assertions are enabled, or a regular exception otherwise: assert false : errorMessage.toString(); throw new IllegalStateException(errorMessage.toString()); @@ -277,7 +307,8 @@ public boolean isNetNewSystemIndex(String indexName) { * @throws IllegalStateException if multiple descriptors match the name */ public @Nullable SystemDataStreamDescriptor findMatchingDataStreamDescriptor(String name) { - final List matchingDescriptors = featureDescriptors.values().stream() + final List matchingDescriptors = featureDescriptors.values() + .stream() .flatMap(feature -> feature.getDataStreamDescriptors().stream()) .filter(descriptor -> descriptor.getDataStreamName().equals(name)) .collect(toUnmodifiableList()); @@ -288,13 +319,20 @@ public boolean isNetNewSystemIndex(String indexName) { return matchingDescriptors.get(0); } else { // This should be prevented by failing on overlapping patterns at startup time, but is here just in case. - StringBuilder errorMessage = new StringBuilder() - .append("DataStream name [") + StringBuilder errorMessage = new StringBuilder().append("DataStream name [") .append(name) .append("] is claimed as a system data stream by multiple descriptors: [") - .append(matchingDescriptors.stream() - .map(descriptor -> "name: [" + descriptor.getDataStreamName() + - "], description: [" + descriptor.getDescription() + "]").collect(Collectors.joining("; "))); + .append( + matchingDescriptors.stream() + .map( + descriptor -> "name: [" + + descriptor.getDataStreamName() + + "], description: [" + + descriptor.getDescription() + + "]" + ) + .collect(Collectors.joining("; ")) + ); // Throw AssertionError if assertions are enabled, or a regular exception otherwise: assert false : errorMessage.toString(); throw new IllegalStateException(errorMessage.toString()); @@ -342,14 +380,13 @@ public Map getFeatures() { } private static Automaton buildIndexAutomaton(Map descriptors) { - Optional automaton = descriptors.values().stream() - .map(SystemIndices::featureToIndexAutomaton) - .reduce(Operations::union); + Optional automaton = descriptors.values().stream().map(SystemIndices::featureToIndexAutomaton).reduce(Operations::union); return MinimizationOperations.minimize(automaton.orElse(EMPTY), Integer.MAX_VALUE); } private static CharacterRunAutomaton buildNetNewIndexCharacterRunAutomaton(Map featureDescriptors) { - Optional automaton = featureDescriptors.values().stream() + Optional automaton = featureDescriptors.values() + .stream() .flatMap(feature -> feature.getIndexDescriptors().stream()) .filter(SystemIndexDescriptor::isNetNew) .map(descriptor -> SystemIndexDescriptor.buildAutomaton(descriptor.getIndexPattern(), descriptor.getAliasName())) @@ -358,7 +395,8 @@ private static CharacterRunAutomaton buildNetNewIndexCharacterRunAutomaton(Map systemIndexAutomaton = feature.getIndexDescriptors().stream() + Optional systemIndexAutomaton = feature.getIndexDescriptors() + .stream() .map(descriptor -> SystemIndexDescriptor.buildAutomaton(descriptor.getIndexPattern(), descriptor.getAliasName())) .reduce(Operations::union); @@ -366,7 +404,8 @@ private static Automaton featureToIndexAutomaton(Feature feature) { } private static Automaton buildDataStreamAutomaton(Map descriptors) { - Optional automaton = descriptors.values().stream() + Optional automaton = descriptors.values() + .stream() .flatMap(feature -> feature.getDataStreamDescriptors().stream()) .map(SystemDataStreamDescriptor::getDataStreamName) .map(dsName -> SystemIndexDescriptor.buildAutomaton(dsName, null)) @@ -381,25 +420,25 @@ private static Predicate buildDataStreamNamePredicate(Map descriptors) { - Optional automaton = descriptors.values().stream() + Optional automaton = descriptors.values() + .stream() .map(SystemIndices::featureToDataStreamBackingIndicesAutomaton) .reduce(Operations::union); return MinimizationOperations.minimize(automaton.orElse(EMPTY), Integer.MAX_VALUE); } private static Automaton featureToDataStreamBackingIndicesAutomaton(Feature feature) { - Optional systemDataStreamAutomaton = feature.getDataStreamDescriptors().stream() - .map(descriptor -> SystemIndexDescriptor.buildAutomaton( - descriptor.getBackingIndexPattern(), - null - )) + Optional systemDataStreamAutomaton = feature.getDataStreamDescriptors() + .stream() + .map(descriptor -> SystemIndexDescriptor.buildAutomaton(descriptor.getBackingIndexPattern(), null)) .reduce(Operations::union); return systemDataStreamAutomaton.orElse(EMPTY); } public SystemDataStreamDescriptor validateDataStreamAccess(String dataStreamName, ThreadContext threadContext) { if (systemDataStreamPredicate.test(dataStreamName)) { - SystemDataStreamDescriptor dataStreamDescriptor = featureDescriptors.values().stream() + SystemDataStreamDescriptor dataStreamDescriptor = featureDescriptors.values() + .stream() .flatMap(feature -> feature.getDataStreamDescriptors().stream()) .filter(descriptor -> descriptor.getDataStreamName().equals(dataStreamName)) .findFirst() @@ -413,7 +452,8 @@ public SystemDataStreamDescriptor validateDataStreamAccess(String dataStreamName if (getProductSystemIndexNamePredicate(threadContext).test(dataStreamName) == false) { throw dataStreamAccessException( threadContext.getHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY), - dataStreamName); + dataStreamName + ); } else { return dataStreamDescriptor; } @@ -439,21 +479,25 @@ public IllegalArgumentException dataStreamAccessException(ThreadContext threadCo public IllegalArgumentException netNewSystemIndexAccessException(ThreadContext threadContext, Collection names) { final String product = threadContext.getHeader(EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY); if (product == null) { - return new IllegalArgumentException("Indices " + Arrays.toString(names.toArray(Strings.EMPTY_ARRAY)) + - " use and access is reserved for system operations"); + return new IllegalArgumentException( + "Indices " + Arrays.toString(names.toArray(Strings.EMPTY_ARRAY)) + " use and access is reserved for system operations" + ); } else { - return new IllegalArgumentException("Indices " + Arrays.toString(names.toArray(Strings.EMPTY_ARRAY)) + - " use and access is reserved for system operations"); + return new IllegalArgumentException( + "Indices " + Arrays.toString(names.toArray(Strings.EMPTY_ARRAY)) + " use and access is reserved for system operations" + ); } } IllegalArgumentException dataStreamAccessException(@Nullable String product, String... dataStreamNames) { if (product == null) { - return new IllegalArgumentException("Data stream(s) " + Arrays.toString(dataStreamNames) + - " use and access is reserved for system operations"); + return new IllegalArgumentException( + "Data stream(s) " + Arrays.toString(dataStreamNames) + " use and access is reserved for system operations" + ); } else { - return new IllegalArgumentException("Data stream(s) " + Arrays.toString(dataStreamNames) + " may not be accessed by product [" - + product + "]"); + return new IllegalArgumentException( + "Data stream(s) " + Arrays.toString(dataStreamNames) + " may not be accessed by product [" + product + "]" + ); } } @@ -505,16 +549,18 @@ public enum SystemIndexAccessLevel { * @throws IllegalStateException Thrown if any of the index patterns overlaps with another. */ static void checkForOverlappingPatterns(Map sourceToFeature) { - List> sourceDescriptorPair = sourceToFeature.entrySet().stream() + List> sourceDescriptorPair = sourceToFeature.entrySet() + .stream() .flatMap(entry -> entry.getValue().getIndexDescriptors().stream().map(descriptor -> new Tuple<>(entry.getKey(), descriptor))) .sorted(Comparator.comparing(d -> d.v1() + ":" + d.v2().getIndexPattern())) // Consistent ordering -> consistent error message .collect(Collectors.toUnmodifiableList()); - List> sourceDataStreamDescriptorPair = sourceToFeature.entrySet().stream() + List> sourceDataStreamDescriptorPair = sourceToFeature.entrySet() + .stream() .filter(entry -> entry.getValue().getDataStreamDescriptors().isEmpty() == false) - .flatMap(entry -> - entry.getValue().getDataStreamDescriptors().stream().map(descriptor -> new Tuple<>(entry.getKey(), descriptor))) - .sorted( - Comparator.comparing(d -> d.v1() + ":" + d.v2().getDataStreamName())) // Consistent ordering -> consistent error message + .flatMap( + entry -> entry.getValue().getDataStreamDescriptors().stream().map(descriptor -> new Tuple<>(entry.getKey(), descriptor)) + ) + .sorted(Comparator.comparing(d -> d.v1() + ":" + d.v2().getDataStreamName())) // Consistent ordering -> consistent error message .collect(Collectors.toUnmodifiableList()); // This is O(n^2) with the number of system index descriptors, and each check is quadratic with the number of states in the @@ -523,27 +569,41 @@ static void checkForOverlappingPatterns(Map sourceToFeature) { sourceDescriptorPair.forEach(descriptorToCheck -> { List> descriptorsMatchingThisPattern = sourceDescriptorPair.stream() .filter(d -> descriptorToCheck.v2() != d.v2()) // Exclude the pattern currently being checked - .filter(d -> overlaps(descriptorToCheck.v2(), d.v2()) || - (d.v2().getAliasName() != null && descriptorToCheck.v2().matchesIndexPattern(d.v2().getAliasName()))) + .filter( + d -> overlaps(descriptorToCheck.v2(), d.v2()) + || (d.v2().getAliasName() != null && descriptorToCheck.v2().matchesIndexPattern(d.v2().getAliasName())) + ) .collect(toUnmodifiableList()); if (descriptorsMatchingThisPattern.isEmpty() == false) { - throw new IllegalStateException("a system index descriptor [" + descriptorToCheck.v2() + "] from [" + - descriptorToCheck.v1() + "] overlaps with other system index descriptors: [" + - descriptorsMatchingThisPattern.stream() - .map(descriptor -> descriptor.v2() + " from [" + descriptor.v1() + "]") - .collect(Collectors.joining(", "))); + throw new IllegalStateException( + "a system index descriptor [" + + descriptorToCheck.v2() + + "] from [" + + descriptorToCheck.v1() + + "] overlaps with other system index descriptors: [" + + descriptorsMatchingThisPattern.stream() + .map(descriptor -> descriptor.v2() + " from [" + descriptor.v1() + "]") + .collect(Collectors.joining(", ")) + ); } List> dataStreamsMatching = sourceDataStreamDescriptorPair.stream() - .filter(dsTuple -> descriptorToCheck.v2().matchesIndexPattern(dsTuple.v2().getDataStreamName()) || - overlaps(descriptorToCheck.v2().getIndexPattern(), dsTuple.v2().getBackingIndexPattern())) + .filter( + dsTuple -> descriptorToCheck.v2().matchesIndexPattern(dsTuple.v2().getDataStreamName()) + || overlaps(descriptorToCheck.v2().getIndexPattern(), dsTuple.v2().getBackingIndexPattern()) + ) .collect(toUnmodifiableList()); if (dataStreamsMatching.isEmpty() == false) { - throw new IllegalStateException("a system index descriptor [" + descriptorToCheck.v2() + "] from [" + - descriptorToCheck.v1() + "] overlaps with one or more data stream descriptors: [" + - dataStreamsMatching.stream() - .map(descriptor -> descriptor.v2() + " from [" + descriptor.v1() + "]") - .collect(Collectors.joining(", "))); + throw new IllegalStateException( + "a system index descriptor [" + + descriptorToCheck.v2() + + "] from [" + + descriptorToCheck.v1() + + "] overlaps with one or more data stream descriptors: [" + + dataStreamsMatching.stream() + .map(descriptor -> descriptor.v2() + " from [" + descriptor.v1() + "]") + .collect(Collectors.joining(", ")) + ); } }); } @@ -564,17 +624,16 @@ private static Map buildSystemIndexDescriptorMap(Map { if (map.putIfAbsent(source, feature) != null) { - throw new IllegalArgumentException("plugin or module attempted to define the same source [" + source + - "] as a built-in system index"); + throw new IllegalArgumentException( + "plugin or module attempted to define the same source [" + source + "] as a built-in system index" + ); } }); return Map.copyOf(map); } Collection getSystemIndexDescriptors() { - return this.featureDescriptors.values().stream() - .flatMap(f -> f.getIndexDescriptors().stream()) - .collect(Collectors.toList()); + return this.featureDescriptors.values().stream().flatMap(f -> f.getIndexDescriptors().stream()).collect(Collectors.toList()); } /** @@ -584,8 +643,13 @@ Collection getSystemIndexDescriptors() { */ public static void validateFeatureName(String name, String plugin) { if (SnapshotsService.NO_FEATURE_STATES_VALUE.equalsIgnoreCase(name)) { - throw new IllegalArgumentException("feature name cannot be reserved name [\"" + SnapshotsService.NO_FEATURE_STATES_VALUE + - "\"], but was for plugin [" + plugin + "]"); + throw new IllegalArgumentException( + "feature name cannot be reserved name [\"" + + SnapshotsService.NO_FEATURE_STATES_VALUE + + "\"], but was for plugin [" + + plugin + + "]" + ); } } @@ -593,6 +657,7 @@ public static void validateFeatureName(String name, String plugin) { * Class holding a description of a stateful feature. */ public static class Feature { + private final String name; private final String description; private final Collection indexDescriptors; private final Collection dataStreamDescriptors; @@ -603,6 +668,7 @@ public static class Feature { /** * Construct a Feature with a custom cleanup function + * @param name The name of the feature * @param description Description of the feature * @param indexDescriptors Collection of objects describing system indices for this feature * @param dataStreamDescriptors Collection of objects describing system data streams for this feature @@ -612,6 +678,7 @@ public static class Feature { * @param postMigrationFunction A function that will be called after upgrading all of this plugin's system indices */ public Feature( + String name, String description, Collection indexDescriptors, Collection dataStreamDescriptors, @@ -620,6 +687,7 @@ public Feature( MigrationPreparationHandler preMigrationFunction, MigrationCompletionHandler postMigrationFunction ) { + this.name = name; this.description = description; this.indexDescriptors = indexDescriptors; this.dataStreamDescriptors = dataStreamDescriptors; @@ -637,6 +705,7 @@ public Feature( */ public Feature(String name, String description, Collection indexDescriptors) { this( + name, description, indexDescriptors, Collections.emptyList(), @@ -668,6 +737,7 @@ public Feature( Collection dataStreamDescriptors ) { this( + name, description, indexDescriptors, dataStreamDescriptors, @@ -693,6 +763,7 @@ public Feature( */ public static Feature fromSystemIndexPlugin(SystemIndexPlugin plugin, Settings settings) { return new Feature( + plugin.getFeatureName(), plugin.getFeatureDescription(), plugin.getSystemIndexDescriptors(settings), plugin.getSystemDataStreamDescriptors(), @@ -723,6 +794,10 @@ public TriConsumer> listener); } + /** + * Type for the handler that's invoked when all of a feature's system indices have been migrated. + * See {@link SystemIndexPlugin#indicesMigrationComplete(Map, ClusterService, Client, ActionListener)}. + */ @FunctionalInterface - interface MigrationCompletionHandler { + public interface MigrationCompletionHandler { void indicesMigrationComplete( Map preUpgradeMetadata, ClusterService clusterService, @@ -809,10 +892,18 @@ void indicesMigrationComplete( ActionListener listener ); } - } - public ExecutorSelector getExecutorSelector() { - return executorSelector; + public static Feature pluginToFeature(SystemIndexPlugin plugin, Settings settings) { + return new Feature( + plugin.getFeatureName(), + plugin.getFeatureDescription(), + plugin.getSystemIndexDescriptors(settings), + plugin.getSystemDataStreamDescriptors(), + plugin.getAssociatedIndexDescriptors(), + plugin::cleanUpFeature, + plugin::prepareForIndicesMigration, + plugin::indicesMigrationComplete + ); + } } - } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 406268bad8c61..faacf6592796f 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -46,6 +46,7 @@ import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; import org.elasticsearch.cluster.metadata.MetadataDataStreamsService; +import org.elasticsearch.cluster.metadata.MetadataUpdateSettingsService; import org.elasticsearch.cluster.metadata.SystemIndexMetadataUpgradeService; import org.elasticsearch.cluster.metadata.TemplateUpgradeService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -171,6 +172,7 @@ import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.upgrades.SystemIndexMigrationExecutor; import org.elasticsearch.usage.UsageService; import org.elasticsearch.watcher.ResourceWatcherService; @@ -429,7 +431,8 @@ protected Node(final Environment initialEnvironment, searchModule.getNamedWriteables().stream(), pluginsService.filterPlugins(Plugin.class).stream() .flatMap(p -> p.getNamedWriteables().stream()), - ClusterModule.getNamedWriteables().stream()) + ClusterModule.getNamedWriteables().stream(), + SystemIndexMigrationExecutor.getNamedWriteables().stream()) .flatMap(Function.identity()).collect(Collectors.toList()); final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); NamedXContentRegistry xContentRegistry = new NamedXContentRegistry(Stream.of( @@ -566,6 +569,15 @@ protected Node(final Environment initialEnvironment, new MetadataCreateDataStreamService(threadPool, clusterService, metadataCreateIndexService); final MetadataDataStreamsService metadataDataStreamsService = new MetadataDataStreamsService(clusterService, indicesService); + final MetadataUpdateSettingsService metadataUpdateSettingsService = new MetadataUpdateSettingsService( + clusterService, + clusterModule.getAllocationService(), + settingsModule.getIndexScopedSettings(), + indicesService, + shardLimitValidator, + threadPool + ); + Collection pluginComponents = pluginsService.filterPlugins(Plugin.class).stream() .flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService, scriptService, xContentRegistry, environment, nodeEnvironment, @@ -651,14 +663,31 @@ protected Node(final Environment initialEnvironment, threadPool, scriptService, bigArrays, searchModule.getFetchPhase(), responseCollectorService, circuitBreakerService, executorSelector); - final List> tasksExecutors = pluginsService - .filterPlugins(PersistentTaskPlugin.class).stream() - .map(p -> p.getPersistentTasksExecutor(clusterService, threadPool, client, settingsModule, - clusterModule.getIndexNameExpressionResolver())) + final SystemIndexMigrationExecutor systemIndexMigrationExecutor = new SystemIndexMigrationExecutor( + client, + clusterService, + systemIndices, + metadataUpdateSettingsService, + metadataCreateIndexService, + settingsModule.getIndexScopedSettings()); + final List> builtinTaskExecutors = Arrays.asList(systemIndexMigrationExecutor); + final List> pluginTaskExectors = pluginsService.filterPlugins(PersistentTaskPlugin.class) + .stream() + .map( + p -> p.getPersistentTasksExecutor( + clusterService, + threadPool, + client, + settingsModule, + clusterModule.getIndexNameExpressionResolver() + ) + ) .flatMap(List::stream) .collect(toList()); - - final PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(tasksExecutors); + final List> allTasksExectors = Stream.of(pluginTaskExectors, builtinTaskExecutors) + .flatMap(List::stream) + .collect(toList()); + final PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(allTasksExectors); final PersistentTasksClusterService persistentTasksClusterService = new PersistentTasksClusterService(settings, registry, clusterService, threadPool); resourcesToClose.add(persistentTasksClusterService); @@ -697,6 +726,7 @@ protected Node(final Environment initialEnvironment, b.bind(MetadataCreateIndexService.class).toInstance(metadataCreateIndexService); b.bind(MetadataCreateDataStreamService.class).toInstance(metadataCreateDataStreamService); b.bind(MetadataDataStreamsService.class).toInstance(metadataDataStreamsService); + b.bind(MetadataUpdateSettingsService.class).toInstance(metadataUpdateSettingsService); b.bind(SearchService.class).toInstance(searchService); b.bind(SearchTransportService.class).toInstance(searchTransportService); b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(searchService::aggReduceContextBuilder)); diff --git a/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java b/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java new file mode 100644 index 0000000000000..dc89e88605d4b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java @@ -0,0 +1,194 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.DiffableUtils; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; +import java.util.stream.Collectors; + +/** + * Holds the results of the most recent attempt to migrate system indices. Updated by {@link SystemIndexMigrator} as it finishes each + * feature, or fails. + */ +public class FeatureMigrationResults implements Metadata.Custom { + public static final String TYPE = "system_index_migration"; + private static final Version MIGRATION_ADDED_VERSION = Version.V_8_0_0; + + private static final ParseField RESULTS_FIELD = new ParseField("results"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(TYPE, a -> { + final Map statuses = ((List>) a[0]).stream() + .collect(Collectors.toMap(Tuple::v1, Tuple::v2)); + return new FeatureMigrationResults(statuses); + }); + + static { + PARSER.declareNamedObjects( + ConstructingObjectParser.constructorArg(), + (p, c, n) -> new Tuple<>(n, SingleFeatureMigrationResult.fromXContent(p)), + v -> { throw new IllegalArgumentException("ordered " + RESULTS_FIELD.getPreferredName() + " are not supported"); }, + RESULTS_FIELD + ); + } + + private final Map featureStatuses; + + public FeatureMigrationResults(Map featureStatuses) { + this.featureStatuses = Objects.requireNonNullElse(featureStatuses, new HashMap<>()); + } + + public FeatureMigrationResults(StreamInput in) throws IOException { + this.featureStatuses = in.readMap(StreamInput::readString, SingleFeatureMigrationResult::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeMap( + featureStatuses, + (StreamOutput outStream, String featureName) -> outStream.writeString(featureName), + (StreamOutput outStream, SingleFeatureMigrationResult featureStatus) -> featureStatus.writeTo(outStream) + ); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(RESULTS_FIELD.getPreferredName(), featureStatuses); + return builder; + } + + public static FeatureMigrationResults fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + /** + * Gets a map of feature name to that feature's status. Only contains features which have either been migrated successfully or + * failed to migrate. + * @return An unmodifiable map of feature names to migration statuses. + */ + public Map getFeatureStatuses() { + return Collections.unmodifiableMap(featureStatuses); + } + + /** + * Convenience method for updating the results of a migration run. Produces a new {@link FeatureMigrationResults} updated with the + * given status for the given feature name. + * @param featureName The feature name to update. If this feature name is already present, its status will be overwritten. + * @param status The status that should be associated with the given {@code featureName}. + * @return A new {@link FeatureMigrationResults} with the given status associated with the given feature name. Other entries in the + * map are unchanged. + */ + public FeatureMigrationResults withResult(String featureName, SingleFeatureMigrationResult status) { + Map newMap = new HashMap<>(featureStatuses); + newMap.put(featureName, status); + return new FeatureMigrationResults(newMap); + } + + @Override + public EnumSet context() { + return Metadata.ALL_CONTEXTS; + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return MIGRATION_ADDED_VERSION; + } + + @Override + public String toString() { + return "SystemIndexMigrationResult{" + "featureStatuses=" + featureStatuses + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if ((o instanceof FeatureMigrationResults) == false) return false; + FeatureMigrationResults that = (FeatureMigrationResults) o; + return featureStatuses.equals(that.featureStatuses); + } + + @Override + public int hashCode() { + return Objects.hash(featureStatuses); + } + + @Override + public Diff diff(Metadata.Custom previousState) { + return new ResultsDiff((FeatureMigrationResults) previousState, this); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return new ResultsDiff(in); + } + + public static class ResultsDiff implements NamedDiff { + private final Diff> resultsDiff; + + public ResultsDiff(FeatureMigrationResults before, FeatureMigrationResults after) { + this.resultsDiff = DiffableUtils.diff(before.featureStatuses, after.featureStatuses, DiffableUtils.getStringKeySerializer()); + } + + public ResultsDiff(StreamInput in) throws IOException { + this.resultsDiff = DiffableUtils.readJdkMapDiff( + in, + DiffableUtils.getStringKeySerializer(), + SingleFeatureMigrationResult::new, + ResultsDiff::readDiffFrom + ); + } + + @Override + public Metadata.Custom apply(Metadata.Custom part) { + TreeMap newResults = new TreeMap<>( + resultsDiff.apply(((FeatureMigrationResults) part).featureStatuses) + ); + return new FeatureMigrationResults(newResults); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + resultsDiff.writeTo(out); + } + + static Diff readDiffFrom(StreamInput in) throws IOException { + return AbstractDiffable.readDiffFrom(SingleFeatureMigrationResult::new, in); + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/upgrades/MigrationResultsUpdateTask.java b/server/src/main/java/org/elasticsearch/upgrades/MigrationResultsUpdateTask.java new file mode 100644 index 0000000000000..8fb3278e2b7b2 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/upgrades/MigrationResultsUpdateTask.java @@ -0,0 +1,100 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; + +import java.util.HashMap; + +/** + * Handles updating the {@link FeatureMigrationResults} in the cluster state. + */ +public class MigrationResultsUpdateTask extends ClusterStateUpdateTask { + private static final Logger logger = LogManager.getLogger(MigrationResultsUpdateTask.class); + + private final String featureName; + private final SingleFeatureMigrationResult status; + private final ActionListener listener; + + private MigrationResultsUpdateTask(String featureName, SingleFeatureMigrationResult status, ActionListener listener) { + this.featureName = featureName; + this.status = status; + this.listener = listener; + } + + /** + * Creates a task that will update the status of a feature migration. + * @param featureName The name of the feature whose status should be updated. + * @param status The status to be associated with the given feature. + * @param listener A listener that will be called upon successfully updating the cluster state. + */ + public static MigrationResultsUpdateTask upsert( + String featureName, + SingleFeatureMigrationResult status, + ActionListener listener + ) { + return new MigrationResultsUpdateTask(featureName, status, listener); + } + + /** + * Submit the update task so that it will actually be executed. + * @param clusterService The cluster service to which this task should be submitted. + */ + public void submit(ClusterService clusterService) { + String source = new ParameterizedMessage("record [{}] migration [{}]", featureName, status.succeeded() ? "success" : "failure") + .getFormattedMessage(); + clusterService.submitStateUpdateTask(source, this); + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + FeatureMigrationResults currentResults = currentState.metadata().custom(FeatureMigrationResults.TYPE); + if (currentResults == null) { + currentResults = new FeatureMigrationResults(new HashMap<>()); + } + FeatureMigrationResults newResults = currentResults.withResult(featureName, status); + final Metadata newMetadata = Metadata.builder(currentState.metadata()) + .putCustom(FeatureMigrationResults.TYPE, newResults) + .build(); + final ClusterState newState = ClusterState.builder(currentState).metadata(newMetadata).build(); + return newState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + listener.onResponse(newState); + } + + @Override + public void onFailure(String source, Exception clusterStateUpdateException) { + if (status.succeeded()) { + logger.warn( + new ParameterizedMessage("failed to update cluster state after successful migration of feature [{}]", featureName), + clusterStateUpdateException + ); + } else { + logger.error( + new ParameterizedMessage( + "failed to update cluster state after failed migration of feature [{}] on index [{}]", + featureName, + status.getFailedIndexName() + ).getFormattedMessage(), + clusterStateUpdateException + ); + } + listener.onFailure(clusterStateUpdateException); + } +} diff --git a/server/src/main/java/org/elasticsearch/upgrades/SingleFeatureMigrationResult.java b/server/src/main/java/org/elasticsearch/upgrades/SingleFeatureMigrationResult.java new file mode 100644 index 0000000000000..0c03e1426e1fe --- /dev/null +++ b/server/src/main/java/org/elasticsearch/upgrades/SingleFeatureMigrationResult.java @@ -0,0 +1,166 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Holds the results of migrating a single feature. See also {@link FeatureMigrationResults}. + */ +public class SingleFeatureMigrationResult extends AbstractDiffable implements Writeable, ToXContent { + private static final String NAME = "feature_migration_status"; + private static final ParseField SUCCESS_FIELD = new ParseField("successful"); + private static final ParseField FAILED_INDEX_NAME_FIELD = new ParseField("failed_index"); + private static final ParseField EXCEPTION_FIELD = new ParseField("exception"); + + private final boolean successful; + @Nullable + private final String failedIndexName; + @Nullable + private final Exception exception; + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, + a -> new SingleFeatureMigrationResult((boolean) a[0], (String) a[1], (Exception) a[2]) + ); + + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), SUCCESS_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FAILED_INDEX_NAME_FIELD); + PARSER.declareObject( + ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), + EXCEPTION_FIELD + ); + } + + private SingleFeatureMigrationResult(boolean successful, String failedIndexName, Exception exception) { + this.successful = successful; + if (successful == false) { + Objects.requireNonNull(failedIndexName, "failed index name must be present for failed feature migration statuses"); + Objects.requireNonNull(exception, "exception details must be present for failed feature migration statuses"); + } + this.failedIndexName = failedIndexName; + this.exception = exception; + } + + SingleFeatureMigrationResult(StreamInput in) throws IOException { + this.successful = in.readBoolean(); + if (this.successful == false) { + this.failedIndexName = in.readString(); + this.exception = in.readException(); + } else { + this.failedIndexName = null; + this.exception = null; + } + } + + public static SingleFeatureMigrationResult fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + /** + * Creates a record indicating that migration succeeded. + */ + public static SingleFeatureMigrationResult success() { + return new SingleFeatureMigrationResult(true, null, null); + } + + /** + * Creates a record indicating that migration failed. + * @param failedIndexName The name of the specific index whose migration failed. + * @param exception The exception which caused the migration failure. + */ + public static SingleFeatureMigrationResult failure(String failedIndexName, Exception exception) { + return new SingleFeatureMigrationResult(false, failedIndexName, exception); + } + + /** + * Returns {@code true} if the migration of this feature's data succeeded, or {@code false} otherwise. + */ + public boolean succeeded() { + return successful; + } + + /** + * Gets the name of the specific index where the migration failure occurred, if the migration failed. + */ + @Nullable + public String getFailedIndexName() { + return failedIndexName; + } + + /** + * Gets the exception that cause the migration failure, if the migration failed. + */ + @Nullable + public Exception getException() { + return exception; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(this.successful); + if (this.successful == false) { + out.writeString(this.failedIndexName); + out.writeException(this.exception); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(SUCCESS_FIELD.getPreferredName(), successful); + if (successful == false) { + builder.field(FAILED_INDEX_NAME_FIELD.getPreferredName(), failedIndexName); + builder.startObject(EXCEPTION_FIELD.getPreferredName()); + { + ElasticsearchException.generateThrowableXContent(builder, params, exception); + } + builder.endObject(); + } + } + builder.endObject(); + return builder; + } + + @Override + public boolean isFragment() { + return false; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if ((o instanceof SingleFeatureMigrationResult) == false) return false; + SingleFeatureMigrationResult that = (SingleFeatureMigrationResult) o; + // Exception is intentionally not checked here + return successful == that.successful && Objects.equals(failedIndexName, that.failedIndexName); + } + + @Override + public int hashCode() { + return Objects.hash(successful, failedIndexName); + } +} diff --git a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationExecutor.java b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationExecutor.java new file mode 100644 index 0000000000000..bf58eb029ffc0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationExecutor.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; +import org.elasticsearch.cluster.metadata.MetadataUpdateSettingsService; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTaskState; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.upgrades.SystemIndexMigrationTaskParams.SYSTEM_INDEX_UPGRADE_TASK_NAME; + +/** + * Starts the process of migrating system indices. See {@link SystemIndexMigrator} for the actual migration logic. + */ +public class SystemIndexMigrationExecutor extends PersistentTasksExecutor { + private final Client client; // NOTE: *NOT* an OriginSettingClient. We have to do that later. + private final ClusterService clusterService; + private final SystemIndices systemIndices; + private final MetadataUpdateSettingsService metadataUpdateSettingsService; + private final MetadataCreateIndexService metadataCreateIndexService; + private final IndexScopedSettings indexScopedSettings; + + public SystemIndexMigrationExecutor( + Client client, + ClusterService clusterService, + SystemIndices systemIndices, + MetadataUpdateSettingsService metadataUpdateSettingsService, + MetadataCreateIndexService metadataCreateIndexService, + IndexScopedSettings indexScopedSettings) { + super(SYSTEM_INDEX_UPGRADE_TASK_NAME, ThreadPool.Names.GENERIC); + this.client = client; + this.clusterService = clusterService; + this.systemIndices = systemIndices; + this.metadataUpdateSettingsService = metadataUpdateSettingsService; + this.metadataCreateIndexService = metadataCreateIndexService; + this.indexScopedSettings = indexScopedSettings; + } + + @Override + protected void nodeOperation(AllocatedPersistentTask task, SystemIndexMigrationTaskParams params, PersistentTaskState state) { + SystemIndexMigrator upgrader = (SystemIndexMigrator) task; + SystemIndexMigrationTaskState upgraderState = (SystemIndexMigrationTaskState) state; + upgrader.run(upgraderState); + } + + @Override + protected AllocatedPersistentTask createTask( + long id, + String type, + String action, + TaskId parentTaskId, + PersistentTasksCustomMetadata.PersistentTask taskInProgress, + Map headers + ) { + return new SystemIndexMigrator( + client, + id, + type, + action, + parentTaskId, + taskInProgress.getParams(), + headers, + clusterService, + systemIndices, + metadataUpdateSettingsService, + metadataCreateIndexService, + indexScopedSettings); + } + + @Override + public PersistentTasksCustomMetadata.Assignment getAssignment( + SystemIndexMigrationTaskParams params, + Collection candidateNodes, + ClusterState clusterState + ) { + // This should select from master-eligible nodes because we already require all master-eligible nodes to have all plugins installed. + // However, due to a misunderstanding, this code as-written needs to run on the master node in particular. This is not a fundamental + // problem, but more that you can't submit cluster state update tasks from non-master nodes. If we translate the process of updating + // the cluster state to a Transport action, we can revert this to selecting any master-eligible node. + DiscoveryNode discoveryNode = clusterState.nodes().getMasterNode(); + if (discoveryNode == null) { + return NO_NODE_FOUND; + } else { + return new PersistentTasksCustomMetadata.Assignment(discoveryNode.getId(), ""); + } + } + + public static List getNamedWriteables() { + return List.of(new NamedWriteableRegistry.Entry( + PersistentTaskState.class, + SYSTEM_INDEX_UPGRADE_TASK_NAME, + SystemIndexMigrationTaskState::new + ), + new NamedWriteableRegistry.Entry( + PersistentTaskParams.class, + SYSTEM_INDEX_UPGRADE_TASK_NAME, + SystemIndexMigrationTaskParams::new + )); + } +} diff --git a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationInfo.java b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationInfo.java new file mode 100644 index 0000000000000..0892fa0d0bd01 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationInfo.java @@ -0,0 +1,284 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.OriginSettingClient; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.plugins.SystemIndexPlugin; + +import java.util.Comparator; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Stream; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.State.CLOSE; + +/** + * Holds the data required to migrate a single system index, including metadata from the current index. If necessary, computes the settings + * and mappings for the "next" index based off of the current one. + */ +class SystemIndexMigrationInfo implements Comparable { + private static final Logger logger = LogManager.getLogger(SystemIndexMigrationInfo.class); + + private final IndexMetadata currentIndex; + private final String featureName; + private final Settings settings; + private final String mapping; + private final String origin; + private final SystemIndices.Feature owningFeature; + + private static final Comparator SAME_CLASS_COMPARATOR = Comparator.comparing( + SystemIndexMigrationInfo::getFeatureName + ).thenComparing(SystemIndexMigrationInfo::getCurrentIndexName); + + private SystemIndexMigrationInfo( + IndexMetadata currentIndex, + String featureName, + Settings settings, + String mapping, + String origin, + SystemIndices.Feature owningFeature + ) { + this.currentIndex = currentIndex; + this.featureName = featureName; + this.settings = settings; + this.mapping = mapping; + this.origin = origin; + this.owningFeature = owningFeature; + } + + /** + * Gets the name of the index to be migrated. + */ + String getCurrentIndexName() { + return currentIndex.getIndex().getName(); + } + + /** + * Indicates if the index to be migrated is closed. + */ + boolean isCurrentIndexClosed() { + return CLOSE.equals(currentIndex.getState()); + } + + /** + * Gets the name to be used for the post-migration index. + */ + String getNextIndexName() { + return currentIndex.getIndex().getName() + SystemIndices.UPGRADED_INDEX_SUFFIX; + } + + /** + * Gets the name of the feature which owns the index to be migrated. + */ + String getFeatureName() { + return featureName; + } + + /** + * Gets the mappings to be used for the post-migration index. + */ + String getMappings() { + return mapping; + } + + /** + * Gets the settings to be used for the post-migration index. + */ + Settings getSettings() { + return settings; + } + + /** + * Gets the origin that should be used when interacting with this index. + */ + String getOrigin() { + return origin; + } + + /** + * Invokes the pre-migration hook for the feature that owns this index. + * See {@link SystemIndexPlugin#prepareForIndicesMigration(ClusterService, Client, ActionListener)}. + * @param clusterService For retrieving the state. + * @param client For performing any update operations necessary to prepare for the upgrade. + * @param listener Call {@link ActionListener#onResponse(Object)} when preparation for migration is complete. + */ + void prepareForIndicesMigration( + ClusterService clusterService, + Client client, + ActionListener> listener) { + owningFeature.getPreMigrationFunction().prepareForIndicesMigration(clusterService, client, listener); + } + + /** + * Invokes the post-migration hooks for the feature that owns this index. + * See {@link SystemIndexPlugin#indicesMigrationComplete(Map, ClusterService, Client, ActionListener)}. + * @param metadata The metadata that was passed into the listener by the pre-migration hook. + * @param clusterService For retrieving the state. + * @param client For performing any update operations necessary to prepare for the upgrade. + * @param listener Call {@link ActionListener#onResponse(Object)} when the hook is finished. + */ + void indicesMigrationComplete( + Map metadata, + ClusterService clusterService, + Client client, + ActionListener listener + ) { + owningFeature.getPostMigrationFunction().indicesMigrationComplete(metadata, clusterService, client, listener); + } + + /** + * Creates a client that's been configured to be able to properly access the system index to be migrated. + * @param baseClient The base client to wrap. + * @return An {@link OriginSettingClient} which uses the origin provided by {@link SystemIndexMigrationInfo#getOrigin()}. + */ + Client createClient(Client baseClient) { + return new OriginSettingClient(baseClient, this.getOrigin()); + } + + @Override + public int compareTo(SystemIndexMigrationInfo o) { + return SAME_CLASS_COMPARATOR.compare(this, o); + } + + @Override + public String toString() { + return "IndexUpgradeInfo[" + + "currentIndex='" + + currentIndex.getIndex().getName() + + "\'" + + ", featureName='" + + featureName + + '\'' + + ", settings=" + + settings + + ", mapping='" + + mapping + + '\'' + + ", origin='" + + origin + + '\''; + } + + static SystemIndexMigrationInfo build( + IndexMetadata currentIndex, + SystemIndexDescriptor descriptor, + SystemIndices.Feature feature, + IndexScopedSettings indexScopedSettings + ) { + Settings.Builder settingsBuilder = Settings.builder(); + if (descriptor.getSettings() != null) { + settingsBuilder.put(descriptor.getSettings()); + settingsBuilder.remove("index.version.created"); // Simplifies testing, should never impact real uses. + } + Settings settings = settingsBuilder.build(); + + String mapping = descriptor.getMappings(); + if (descriptor.isAutomaticallyManaged() == false) { + // Get Settings from old index + settings = copySettingsForNewIndex(currentIndex.getSettings(), indexScopedSettings); + + // Copy mapping from the old index + mapping = currentIndex.mapping().source().string(); + } + return new SystemIndexMigrationInfo( + currentIndex, + feature.getName(), + settings, + mapping, + descriptor.getOrigin(), + feature); + } + + private static Settings copySettingsForNewIndex(Settings currentIndexSettings, IndexScopedSettings indexScopedSettings) { + Settings.Builder newIndexSettings = Settings.builder(); + currentIndexSettings.keySet() + .stream() + .filter(settingKey -> indexScopedSettings.isPrivateSetting(settingKey) == false) + .map(indexScopedSettings::get) + .filter(Objects::nonNull) + .filter(setting -> setting.getProperties().contains(Setting.Property.NotCopyableOnResize) == false) + .filter(setting -> setting.getProperties().contains(Setting.Property.PrivateIndex) == false) + .forEach(setting -> { newIndexSettings.put(setting.getKey(), currentIndexSettings.get(setting.getKey())); }); + return newIndexSettings.build(); + } + + /** + * Convenience factory method holding the logic for creating instances from a Feature object. + * @param feature The feature that + * @param metadata The current metadata, as index migration depends on the current state of the clsuter. + * @param indexScopedSettings This is necessary to make adjustments to the indices settings for unmanaged indices. + * @return A {@link Stream} of {@link SystemIndexMigrationInfo}s that represent all the indices the given feature currently owns. + */ + static Stream fromFeature( + SystemIndices.Feature feature, + Metadata metadata, + IndexScopedSettings indexScopedSettings + ) { + return feature.getIndexDescriptors() + .stream() + .flatMap(descriptor -> descriptor.getMatchingIndices(metadata).stream().map(metadata::index).filter(imd -> { + assert imd != null : "got null IndexMetadata for index in system index descriptor [" + descriptor.getIndexPattern() + "]"; + return Objects.nonNull(imd); + }).map(imd -> SystemIndexMigrationInfo.build(imd, descriptor, feature, indexScopedSettings))); + } + + static SystemIndexMigrationInfo fromTaskState( + SystemIndexMigrationTaskState taskState, + SystemIndices systemIndices, + Metadata metadata, + IndexScopedSettings indexScopedSettings + ) { + SystemIndexDescriptor descriptor = systemIndices.findMatchingDescriptor(taskState.getCurrentIndex()); + SystemIndices.Feature feature = systemIndices.getFeatures().get(taskState.getCurrentFeature()); + IndexMetadata imd = metadata.index(taskState.getCurrentIndex()); + + // It's possible for one or both of these to happen if the executing node fails during execution and: + // 1. The task gets assigned to a node with a different set of plugins installed. + // 2. The index in question is somehow deleted before we got to it. + // The first case shouldn't happen, master nodes must have all `SystemIndexPlugins` installed. + // In the second case, we should just start over. + if (descriptor == null) { + String errorMsg = new ParameterizedMessage( + "couldn't find system index descriptor for index [{}] from feature [{}], which likely means this node is missing a plugin", + taskState.getCurrentIndex(), + taskState.getCurrentFeature() + ).toString(); + logger.warn(errorMsg); + assert false : errorMsg; + throw new IllegalStateException(errorMsg); + } + + if (imd == null) { + String errorMsg = new ParameterizedMessage( + "couldn't find index [{}] from feature [{}] with descriptor pattern [{}]", + taskState.getCurrentIndex(), + taskState.getCurrentFeature(), + descriptor.getIndexPattern() + ).toString(); + logger.warn(errorMsg); + assert false : errorMsg; + throw new IllegalStateException(errorMsg); + } + + return build(imd, descriptor, feature, indexScopedSettings); + } +} diff --git a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskParams.java b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskParams.java new file mode 100644 index 0000000000000..3f63b29c64a79 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskParams.java @@ -0,0 +1,75 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * The params used to initialize {@link SystemIndexMigrator} when it's initially kicked off. + * + * Currently doesn't do anything. In the future, a name of a feature could be used to indicate that only a specific feature should be + * migrated. + */ +public class SystemIndexMigrationTaskParams implements PersistentTaskParams { + + public static final String SYSTEM_INDEX_UPGRADE_TASK_NAME = "upgrade-system-indices"; + public static final ObjectParser PARSER = new ObjectParser<>( + SYSTEM_INDEX_UPGRADE_TASK_NAME, + true, + SystemIndexMigrationTaskParams::new + ); + static { + // PARSER.declareString etc... + } + + public SystemIndexMigrationTaskParams() { + // This space intentionally left blank + } + + public SystemIndexMigrationTaskParams(StreamInput in) { + // This space intentionally left blank + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // This space intentionally left blank + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public String getWriteableName() { + return SYSTEM_INDEX_UPGRADE_TASK_NAME; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_8_0_0; + } + + @Override public boolean equals(Object obj) { + return obj instanceof SystemIndexMigrationTaskParams; + } + + @Override public int hashCode() { + return 0; + } +} diff --git a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskState.java b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskState.java new file mode 100644 index 0000000000000..06a5acedc61cc --- /dev/null +++ b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskState.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.persistent.PersistentTaskState; +import org.elasticsearch.xcontent.ConstructingObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.upgrades.SystemIndexMigrationTaskParams.SYSTEM_INDEX_UPGRADE_TASK_NAME; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Contains the current state of system index migration progress. Used to resume runs if there's a node failure during migration. + */ +public class SystemIndexMigrationTaskState implements PersistentTaskState { + private static final ParseField CURRENT_INDEX_FIELD = new ParseField("current_index"); + private static final ParseField CURRENT_FEATURE_FIELD = new ParseField("current_feature"); + private static final ParseField FEATURE_METADATA_MAP_FIELD = new ParseField("feature_metadata"); + + @SuppressWarnings(value = "unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + SYSTEM_INDEX_UPGRADE_TASK_NAME, + true, + args -> new SystemIndexMigrationTaskState((String) args[0], (String) args[1], (Map) args[3]) + ); + + static { + PARSER.declareString(constructorArg(), CURRENT_INDEX_FIELD); + PARSER.declareString(constructorArg(), CURRENT_FEATURE_FIELD); + PARSER.declareObject(constructorArg(), (p, c) -> p.map(), FEATURE_METADATA_MAP_FIELD); + } + + private final String currentIndex; + private final String currentFeature; + private final Map featureCallbackMetadata; + + public SystemIndexMigrationTaskState(String currentIndex, String currentFeature, Map featureCallbackMetadata) { + this.currentIndex = Objects.requireNonNull(currentIndex); + this.currentFeature = Objects.requireNonNull(currentFeature); + this.featureCallbackMetadata = Objects.requireNonNullElse(featureCallbackMetadata, new HashMap<>()); + } + + public SystemIndexMigrationTaskState(StreamInput in) throws IOException { + this.currentIndex = in.readString(); + this.currentFeature = in.readString(); + this.featureCallbackMetadata = in.readMap(); + } + + /** + * Gets the name of the index that's currently being migrated. + */ + public String getCurrentIndex() { + return currentIndex; + } + + /** + * Gets the name of the feature which owns the index that's currently being migrated. + */ + public String getCurrentFeature() { + return currentFeature; + } + + /** + * Retrieves metadata stored by the pre-upgrade hook, intended for consumption by the post-migration hook. + * See {@link org.elasticsearch.plugins.SystemIndexPlugin#prepareForIndicesMigration(ClusterService, Client, ActionListener)} and + * {@link org.elasticsearch.plugins.SystemIndexPlugin#indicesMigrationComplete(Map, ClusterService, Client, ActionListener)} for + * details on the pre- and post-migration hooks. + */ + public Map getFeatureCallbackMetadata() { + return featureCallbackMetadata; + } + + public static SystemIndexMigrationTaskState fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(CURRENT_INDEX_FIELD.getPreferredName(), currentIndex); + builder.field(CURRENT_FEATURE_FIELD.getPreferredName(), currentFeature); + builder.field(FEATURE_METADATA_MAP_FIELD.getPreferredName(), featureCallbackMetadata); + } + builder.endObject(); + return null; + } + + @Override + public String getWriteableName() { + return SYSTEM_INDEX_UPGRADE_TASK_NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(currentIndex); + out.writeString(currentFeature); + out.writeMap(featureCallbackMetadata); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if ((o instanceof SystemIndexMigrationTaskState) == false) return false; + SystemIndexMigrationTaskState that = (SystemIndexMigrationTaskState) o; + return currentIndex.equals(that.currentIndex) + && currentFeature.equals(that.currentFeature) + && featureCallbackMetadata.equals(that.featureCallbackMetadata); + } + + @Override + public int hashCode() { + return Objects.hash(currentIndex, currentFeature, featureCallbackMetadata); + } +} diff --git a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java new file mode 100644 index 0000000000000..1164b0a0ba8ac --- /dev/null +++ b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java @@ -0,0 +1,610 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; +import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.ShardsAcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.ParentTaskAssigningClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; +import org.elasticsearch.cluster.metadata.MetadataUpdateSettingsService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.ReindexAction; +import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.indices.SystemIndices; +import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.tasks.TaskId; + +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Queue; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +import static org.elasticsearch.action.admin.cluster.migration.TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_VERSION; +import static org.elasticsearch.cluster.metadata.IndexMetadata.State.CLOSE; + +/** + * This is where the logic to actually perform the migration lives - {@link SystemIndexMigrator#run(SystemIndexMigrationTaskState)} will + * be invoked when the migration process is started, plus any time the node running the migration drops from the cluster/crashes/etc. + * + * See {@link SystemIndexMigrationTaskState} for the data that's saved for node failover. + */ +public class SystemIndexMigrator extends AllocatedPersistentTask { + private static final Logger logger = LogManager.getLogger(SystemIndexMigrator.class); + + private static final Version READY_FOR_MIGRATION_VERSION = Version.V_7_16_0; + + // Fixed properties & services + private final ParentTaskAssigningClient baseClient; + private final ClusterService clusterService; + private final SystemIndices systemIndices; + private final MetadataUpdateSettingsService metadataUpdateSettingsService; + private final MetadataCreateIndexService metadataCreateIndexService; + private final IndexScopedSettings indexScopedSettings; + + // In-memory state + // NOTE: This queue is not a thread-safe class. Use `synchronized (migrationQueue)` whenever you access this. I chose this rather than + // a synchronized/concurrent collection or an AtomicReference because we often need to do compound operations, which are much simpler + // with `synchronized` blocks than when only the collection accesses are protected. + private final Queue migrationQueue = new LinkedList<>(); + private final AtomicReference> currentFeatureCallbackMetadata = new AtomicReference<>(); + + public SystemIndexMigrator( + Client client, + long id, + String type, + String action, + TaskId parentTask, + SystemIndexMigrationTaskParams params, + Map headers, + ClusterService clusterService, + SystemIndices systemIndices, + MetadataUpdateSettingsService metadataUpdateSettingsService, + MetadataCreateIndexService metadataCreateIndexService, + IndexScopedSettings indexScopedSettings + ) { + super(id, type, action, "system-index-migrator", parentTask, headers); + this.baseClient = new ParentTaskAssigningClient(client, parentTask); + this.clusterService = clusterService; + this.systemIndices = systemIndices; + this.metadataUpdateSettingsService = metadataUpdateSettingsService; + this.metadataCreateIndexService = metadataCreateIndexService; + this.indexScopedSettings = indexScopedSettings; + } + + public void run(SystemIndexMigrationTaskState taskState) { + ClusterState clusterState = clusterService.state(); + + final String stateIndexName; + final String stateFeatureName; + + if (taskState != null) { + currentFeatureCallbackMetadata.set(taskState.getFeatureCallbackMetadata()); + stateIndexName = taskState.getCurrentIndex(); + stateFeatureName = taskState.getCurrentFeature(); + + SystemIndices.Feature feature = systemIndices.getFeatures().get(stateFeatureName); + if (feature == null) { + markAsFailed( + new IllegalStateException( + "cannot migrate feature [" + stateFeatureName + "] because that feature is not installed on this node" + ) + ); + return; + } + + if (stateIndexName != null && clusterState.metadata().hasIndex(stateIndexName) == false) { + markAsFailed(new IndexNotFoundException(stateIndexName, "cannot migrate because that index does not exist")); + return; + } + } else { + stateIndexName = null; + stateFeatureName = null; + } + + synchronized (migrationQueue) { + if (migrationQueue.isEmpty() == false && taskState == null) { + // A null task state means this is a new execution, not resumed from a failure + markAsFailed(new IllegalStateException("migration is already in progress, cannot start new migration")); + return; + } + + systemIndices.getFeatures() + .values() + .stream() + .flatMap(feature -> SystemIndexMigrationInfo.fromFeature(feature, clusterState.metadata(), indexScopedSettings)) + .filter(migrationInfo -> needsToBeMigrated(clusterState.metadata().index(migrationInfo.getCurrentIndexName()))) + .sorted() // Stable order between nodes + .collect(Collectors.toCollection(() -> migrationQueue)); + + List closedIndices = migrationQueue.stream() + .filter(SystemIndexMigrationInfo::isCurrentIndexClosed) + .map(SystemIndexMigrationInfo::getCurrentIndexName) + .collect(Collectors.toList()); + if (closedIndices.isEmpty() == false) { + markAsFailed( + new IllegalStateException( + new ParameterizedMessage("indices must be open to be migrated, but indices {} are closed", closedIndices) + .getFormattedMessage() + ) + ); + return; + } + + // The queue we just generated *should* be the same one as was generated on the last node, so the first entry in the queue + // should be the same as is in the task state + if (stateIndexName != null && stateFeatureName != null && migrationQueue.isEmpty() == false) { + SystemIndexMigrationInfo nextMigrationInfo = migrationQueue.peek(); + // This should never, ever happen in testing mode, but could conceivably happen if there are different sets of plugins + // installed on the previous node vs. this one. + assert nextMigrationInfo.getFeatureName().equals(stateFeatureName) + && nextMigrationInfo.getCurrentIndexName().equals(stateIndexName) : "index name [" + + stateIndexName + + "] or feature name [" + + stateFeatureName + + "] from task state did not match first index [" + + nextMigrationInfo.getCurrentIndexName() + + "] and feature [" + + nextMigrationInfo.getFeatureName() + + "] of locally computed queue, see logs"; + if (nextMigrationInfo.getCurrentIndexName().equals(stateIndexName) == false) { + if (clusterState.metadata().hasIndex(stateIndexName) == false) { + // If we don't have that index at all, and also don't have the next one + markAsFailed( + new IllegalStateException( + new ParameterizedMessage( + "failed to resume system index migration from index [{}], that index is not present in the cluster", + stateIndexName + ).getFormattedMessage() + ) + ); + } + logger.warn( + new ParameterizedMessage( + "resuming system index migration with index [{}], which does not match index given in last task state [{}]", + nextMigrationInfo.getCurrentIndexName(), + stateIndexName + ) + ); + } + } + } + + // Kick off our callback "loop" - finishIndexAndLoop calls back into prepareNextIndex + cleanUpPreviousMigration( + taskState, + clusterState, + state -> prepareNextIndex(state, state2 -> migrateSingleIndex(state2, this::finishIndexAndLoop), stateFeatureName) + ); + } + + private void cleanUpPreviousMigration( + SystemIndexMigrationTaskState taskState, + ClusterState currentState, + Consumer listener + ) { + logger.debug("cleaning up previous migration, task state: [{}]", taskState == null ? "null" : Strings.toString(taskState)); + if (taskState != null && taskState.getCurrentIndex() != null) { + SystemIndexMigrationInfo migrationInfo; + try { + migrationInfo = SystemIndexMigrationInfo.fromTaskState( + taskState, + systemIndices, + currentState.metadata(), + indexScopedSettings + ); + } catch (Exception e) { + markAsFailed(e); + return; + } + final String newIndexName = migrationInfo.getNextIndexName(); + logger.info("removing index [{}] from previous incomplete migration", newIndexName); + + migrationInfo.createClient(baseClient) + .admin() + .indices() + .prepareDelete(newIndexName) + .execute(ActionListener.wrap(ackedResponse -> { + if (ackedResponse.isAcknowledged()) { + logger.debug("successfully removed index [{}]", newIndexName); + clearResults(clusterService, ActionListener.wrap(listener::accept, this::markAsFailed)); + } + }, this::markAsFailed)); + } else { + logger.debug("no incomplete index to remove"); + clearResults(clusterService, ActionListener.wrap(listener::accept, this::markAsFailed)); + } + } + + private void finishIndexAndLoop(BulkByScrollResponse bulkResponse) { + // The BulkByScroll response is validated in #migrateSingleIndex, it's just here to satisfy the ActionListener type + assert bulkResponse.isTimedOut() == false + && (bulkResponse.getBulkFailures() == null || bulkResponse.getBulkFailures().isEmpty()) + && (bulkResponse.getSearchFailures() == null + || bulkResponse.getSearchFailures() + .isEmpty()) : "If this assertion gets triggered it means the validation in migrateSingleIndex isn't working right"; + SystemIndexMigrationInfo lastMigrationInfo = currentMigrationInfo(); + logger.info( + "finished migrating old index [{}] from feature [{}] to new index [{}]", + lastMigrationInfo.getCurrentIndexName(), + lastMigrationInfo.getFeatureName(), + lastMigrationInfo.getNextIndexName() + ); + assert migrationQueue != null && migrationQueue.isEmpty() == false; + synchronized (migrationQueue) { + migrationQueue.remove(); + } + SystemIndexMigrationInfo nextMigrationInfo = currentMigrationInfo(); + if (nextMigrationInfo == null || nextMigrationInfo.getFeatureName().equals(lastMigrationInfo.getFeatureName()) == false) { + // The next feature name is different than the last one, so we just finished a feature - time to invoke its post-migration hook + lastMigrationInfo.indicesMigrationComplete( + currentFeatureCallbackMetadata.get(), + clusterService, + baseClient, + ActionListener.wrap(successful -> { + if (successful == false) { + // GWB> Should we actually fail in this case instead of plugging along? + logger.warn( + "post-migration hook for feature [{}] indicated failure; feature migration metadata prior to failure was [{}]", + lastMigrationInfo.getFeatureName(), + currentFeatureCallbackMetadata.get() + ); + } + recordIndexMigrationSuccess(lastMigrationInfo); + }, this::markAsFailed) + ); + } else { + prepareNextIndex( + clusterService.state(), + state2 -> migrateSingleIndex(state2, this::finishIndexAndLoop), + lastMigrationInfo.getFeatureName() + ); + } + } + + private void recordIndexMigrationSuccess(SystemIndexMigrationInfo lastMigrationInfo) { + MigrationResultsUpdateTask updateTask = MigrationResultsUpdateTask.upsert( + lastMigrationInfo.getFeatureName(), + SingleFeatureMigrationResult.success(), + ActionListener.wrap( + state -> { + prepareNextIndex( + state, + clusterState -> migrateSingleIndex(clusterState, this::finishIndexAndLoop), + lastMigrationInfo.getFeatureName() + ); + }, + this::markAsFailed + ) + ); + updateTask.submit(clusterService); + } + + private void prepareNextIndex(ClusterState clusterState, Consumer listener, String lastFeatureName) { + synchronized (migrationQueue) { + assert migrationQueue != null; + if (migrationQueue.isEmpty()) { + logger.info("finished migrating feature indices"); + markAsCompleted(); + return; + } + } + + final SystemIndexMigrationInfo migrationInfo = currentMigrationInfo(); + assert migrationInfo != null : "the queue of indices to migrate should have been checked for emptiness before calling this method"; + logger.info( + "preparing to migrate old index [{}] from feature [{}] to new index [{}]", + migrationInfo.getCurrentIndexName(), + migrationInfo.getFeatureName(), + migrationInfo.getNextIndexName() + ); + final AtomicReference> updatedTaskStateFeatureMetadata = new AtomicReference<>(); + if (migrationInfo.getFeatureName().equals(lastFeatureName) == false) { + // And then invoke the pre-migration hook for the next one. + migrationInfo.prepareForIndicesMigration( + clusterService, + baseClient, + ActionListener.wrap(updatedTaskStateFeatureMetadata::set, this::markAsFailed) + ); + } else { + // Otherwise, just re-use what we already have. + updatedTaskStateFeatureMetadata.set(currentFeatureCallbackMetadata.get()); + } + final SystemIndexMigrationTaskState newTaskState = new SystemIndexMigrationTaskState( + migrationInfo.getCurrentIndexName(), + migrationInfo.getFeatureName(), + updatedTaskStateFeatureMetadata.get() + ); + logger.debug("updating task state to [{}]", Strings.toString(newTaskState)); + currentFeatureCallbackMetadata.set(updatedTaskStateFeatureMetadata.get()); + updatePersistentTaskState(newTaskState, ActionListener.wrap(task -> { + assert newTaskState.equals(task.getState()) : "task state returned by update method did not match submitted task state"; + logger.debug("new task state [{}] accepted", Strings.toString(newTaskState)); + listener.accept(clusterState); + }, this::markAsFailed)); + } + + private boolean needsToBeMigrated(IndexMetadata indexMetadata) { + assert indexMetadata != null : "null IndexMetadata should be impossible, we're not consistently using the same cluster state"; + if (indexMetadata == null) { + return false; + } + return indexMetadata.isSystem() && indexMetadata.getCreationVersion().before(NO_UPGRADE_REQUIRED_VERSION); + } + + private void migrateSingleIndex(ClusterState clusterState, Consumer listener) { + final SystemIndexMigrationInfo migrationInfo = currentMigrationInfo(); + String oldIndexName = migrationInfo.getCurrentIndexName(); + final IndexMetadata imd = clusterState.metadata().index(oldIndexName); + if (imd.getState().equals(CLOSE)) { + logger.error( + "unable to migrate index [{}] from feature [{}] because it is closed", + oldIndexName, + migrationInfo.getFeatureName() + ); + markAsFailed(new IllegalStateException("unable to migrate index [" + oldIndexName + "] because it is closed")); + return; + } + Index oldIndex = imd.getIndex(); + String newIndexName = migrationInfo.getNextIndexName(); + logger.info("migrating index [{}] from feature [{}] to new index [{}]", oldIndexName, migrationInfo.getFeatureName(), newIndexName); + ActionListener innerListener = ActionListener.wrap(listener::accept, this::markAsFailed); + try { + Exception versionException = checkNodeVersionsReadyForMigration(clusterState); + if (versionException != null) { + markAsFailed(versionException); + return; + } + createIndex(migrationInfo, ActionListener.wrap(shardsAcknowledgedResponse -> { + logger.debug( + "while migrating [{}] , got create index response: [{}]", + oldIndexName, + Strings.toString(shardsAcknowledgedResponse) + ); + setWriteBlock( + oldIndex, + true, + ActionListener.wrap(setReadOnlyResponse -> reindex(migrationInfo, ActionListener.wrap(bulkByScrollResponse -> { + logger.debug( + "while migrating [{}], got reindex response: [{}]", + oldIndexName, + Strings.toString(bulkByScrollResponse) + ); + if ((bulkByScrollResponse.getBulkFailures() != null && bulkByScrollResponse.getBulkFailures().isEmpty() == false) + || (bulkByScrollResponse.getSearchFailures() != null + && bulkByScrollResponse.getSearchFailures().isEmpty() == false)) { + removeReadOnlyBlockOnReindexFailure( + oldIndex, + innerListener, + logAndThrowExceptionForFailures(bulkByScrollResponse) + ); + } else { + // Successful completion of reindexing - remove read only and delete old index + setWriteBlock( + oldIndex, + false, + ActionListener.wrap( + setAliasAndRemoveOldIndex(migrationInfo, bulkByScrollResponse, innerListener), + innerListener::onFailure + ) + ); + } + }, e -> { + logger.error( + new ParameterizedMessage( + "error occurred while reindexing index [{}] from feature [{}] to destination index [{}]", + oldIndexName, + migrationInfo.getFeatureName(), + newIndexName + ), + e + ); + removeReadOnlyBlockOnReindexFailure(oldIndex, innerListener, e); + })), innerListener::onFailure) + ); + }, innerListener::onFailure)); + } catch (Exception ex) { + logger.error( + new ParameterizedMessage( + "error occurred while migrating index [{}] from feature [{}] to new index [{}]", + oldIndexName, + migrationInfo.getFeatureName(), + newIndexName + ), + ex + ); + removeReadOnlyBlockOnReindexFailure(oldIndex, innerListener, ex); + innerListener.onFailure(ex); + } + } + + private void createIndex(SystemIndexMigrationInfo migrationInfo, ActionListener listener) { + final CreateIndexClusterStateUpdateRequest createRequest = new CreateIndexClusterStateUpdateRequest( + "migrate-system-index", + migrationInfo.getNextIndexName(), + migrationInfo.getNextIndexName() + ); + + createRequest.waitForActiveShards(ActiveShardCount.ALL) + .mappings(migrationInfo.getMappings()) + .settings(Objects.requireNonNullElse(migrationInfo.getSettings(), Settings.EMPTY)); + metadataCreateIndexService.createIndex(createRequest, listener); + } + + private CheckedConsumer setAliasAndRemoveOldIndex( + SystemIndexMigrationInfo migrationInfo, + BulkByScrollResponse bulkByScrollResponse, + ActionListener listener + ) { + final IndicesAliasesRequestBuilder aliasesRequest = migrationInfo.createClient(baseClient).admin().indices().prepareAliases(); + aliasesRequest.removeIndex(migrationInfo.getCurrentIndexName()); + aliasesRequest.addAlias(migrationInfo.getNextIndexName(), migrationInfo.getCurrentIndexName()); + + // Copy all the aliases from the old index + IndexMetadata imd = clusterService.state().metadata().index(migrationInfo.getCurrentIndexName()); + imd.getAliases().values().forEach(aliasToAdd -> { + aliasesRequest.addAliasAction( + IndicesAliasesRequest.AliasActions.add() + .index(migrationInfo.getNextIndexName()) + .alias(aliasToAdd.alias()) + .indexRouting(aliasToAdd.indexRouting()) + .searchRouting(aliasToAdd.searchRouting()) + .filter(aliasToAdd.filter() == null ? null : aliasToAdd.filter().string()) + .writeIndex(null) + ); + }); + + // Technically this callback might have a different cluster state, but it shouldn't matter - these indices shouldn't be changing + // while we're trying to migrate them. + return unsetReadOnlyResponse -> aliasesRequest.execute( + ActionListener.wrap(deleteIndexResponse -> listener.onResponse(bulkByScrollResponse), listener::onFailure) + ); + } + + /** + * Makes the index readonly if it's not set as a readonly yet + */ + private void setWriteBlock(Index index, boolean readOnlyValue, ActionListener listener) { + final Settings readOnlySettings = Settings.builder().put(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey(), readOnlyValue).build(); + UpdateSettingsClusterStateUpdateRequest updateSettingsRequest = new UpdateSettingsClusterStateUpdateRequest().indices( + new Index[] { index } + ).settings(readOnlySettings).setPreserveExisting(false); + + metadataUpdateSettingsService.updateSettings(updateSettingsRequest, listener); + } + + private void reindex(SystemIndexMigrationInfo migrationInfo, ActionListener listener) { + ReindexRequest reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices(migrationInfo.getCurrentIndexName()); + reindexRequest.setDestIndex(migrationInfo.getNextIndexName()); + reindexRequest.setRefresh(true); + migrationInfo.createClient(baseClient).execute(ReindexAction.INSTANCE, reindexRequest, listener); + } + + // Failure handlers + private void removeReadOnlyBlockOnReindexFailure(Index index, ActionListener listener, Exception ex) { + logger.info("removing read only block on [{}] because reindex failed [{}]", index, ex); + setWriteBlock(index, false, ActionListener.wrap(unsetReadOnlyResponse -> listener.onFailure(ex), e1 -> listener.onFailure(ex))); + } + + private ElasticsearchException logAndThrowExceptionForFailures(BulkByScrollResponse bulkByScrollResponse) { + String bulkFailures = (bulkByScrollResponse.getBulkFailures() != null) + ? Strings.collectionToCommaDelimitedString(bulkByScrollResponse.getBulkFailures()) + : ""; + String searchFailures = (bulkByScrollResponse.getSearchFailures() != null) + ? Strings.collectionToCommaDelimitedString(bulkByScrollResponse.getSearchFailures()) + : ""; + logger.error("error occurred while reindexing, bulk failures [{}], search failures [{}]", bulkFailures, searchFailures); + return new ElasticsearchException( + "error occurred while reindexing, bulk failures [{}], search failures [{}]", + bulkFailures, + searchFailures + ); + } + + /** + * Clears the migration queue, marks the task as failed, and saves the exception for later retrieval. + * + * This method is **ASYNC**, so if you're not using it as a listener, be sure to {@code return} after calling this. + */ + @Override + public void markAsFailed(Exception e) { + SystemIndexMigrationInfo migrationInfo = currentMigrationInfo(); + synchronized (migrationQueue) { + migrationQueue.clear(); + } + String featureName = Optional.ofNullable(migrationInfo).map(SystemIndexMigrationInfo::getFeatureName).orElse(""); + String indexName = Optional.ofNullable(migrationInfo).map(SystemIndexMigrationInfo::getCurrentIndexName).orElse(""); + + MigrationResultsUpdateTask.upsert( + featureName, + SingleFeatureMigrationResult.failure(indexName, e), + ActionListener.wrap(state -> super.markAsFailed(e), exception -> super.markAsFailed(e)) + ).submit(clusterService); + super.markAsFailed(e); + } + + private static Exception checkNodeVersionsReadyForMigration(ClusterState state) { + final Version minNodeVersion = state.nodes().getMinNodeVersion(); + if (minNodeVersion.before(READY_FOR_MIGRATION_VERSION)) { + return new IllegalStateException( + "all nodes must be on version [" + + READY_FOR_MIGRATION_VERSION + + "] or later to migrate feature indices but lowest node version currently in cluster is [" + + minNodeVersion + + "]" + ); + } + return null; + } + + /** + * Creates a task that will clear the results of previous migration attempts. + * @param clusterService The cluster service. + * @param listener A listener that will be called upon successfully updating the cluster state. + */ + private static void clearResults(ClusterService clusterService, ActionListener listener) { + clusterService.submitStateUpdateTask("clear migration results", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + if (currentState.metadata().custom(FeatureMigrationResults.TYPE) != null) { + return ClusterState.builder(currentState) + .metadata(Metadata.builder(currentState.metadata()).removeCustom(FeatureMigrationResults.TYPE)) + .build(); + } + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + listener.onResponse(newState); + } + + @Override + public void onFailure(String source, Exception e) { + logger.error("failed to clear migration results when starting new migration", e); + listener.onFailure(e); + } + }); + logger.debug("submitted update task to clear migration results"); + } + + private SystemIndexMigrationInfo currentMigrationInfo() { + synchronized (migrationQueue) { + return migrationQueue.peek(); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponseTests.java index 6888aa4168e48..c4881612edbcc 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponseTests.java @@ -15,6 +15,9 @@ import java.io.IOException; import java.util.Collections; +import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.ERROR; +import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.IN_PROGRESS; +import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED; import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.MIGRATION_NEEDED; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -55,19 +58,44 @@ public void testConstructorHandlesNullLists() { assertThat(response.getFeatureUpgradeStatuses(), equalTo(Collections.emptyList())); } + public void testUpgradeStatusCominations() { + assertEquals(NO_MIGRATION_NEEDED, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(NO_MIGRATION_NEEDED, NO_MIGRATION_NEEDED)); + + assertEquals(MIGRATION_NEEDED, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(NO_MIGRATION_NEEDED, MIGRATION_NEEDED)); + assertEquals(MIGRATION_NEEDED, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(MIGRATION_NEEDED, NO_MIGRATION_NEEDED)); + assertEquals(MIGRATION_NEEDED, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(MIGRATION_NEEDED, MIGRATION_NEEDED)); + + + assertEquals(IN_PROGRESS, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(IN_PROGRESS, NO_MIGRATION_NEEDED)); + assertEquals(IN_PROGRESS, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(NO_MIGRATION_NEEDED, IN_PROGRESS)); + assertEquals(IN_PROGRESS, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(MIGRATION_NEEDED, IN_PROGRESS)); + assertEquals(IN_PROGRESS, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(IN_PROGRESS, MIGRATION_NEEDED)); + assertEquals(IN_PROGRESS, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(IN_PROGRESS, IN_PROGRESS)); + + + assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(ERROR, NO_MIGRATION_NEEDED)); + assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(NO_MIGRATION_NEEDED, ERROR)); + assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(MIGRATION_NEEDED, ERROR)); + assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(ERROR, MIGRATION_NEEDED)); + assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(IN_PROGRESS, ERROR)); + assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(ERROR, IN_PROGRESS)); + assertEquals(ERROR, GetFeatureUpgradeStatusResponse.UpgradeStatus.combine(ERROR, ERROR)); + } + private static GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus createFeatureStatus() { return new GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus( randomAlphaOfLengthBetween(3, 20), randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), randomFrom(org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.values()), - randomList(4, GetFeatureUpgradeStatusResponseTests::getIndexVersion) + randomList(4, GetFeatureUpgradeStatusResponseTests::getIndexInfo) ); } - private static GetFeatureUpgradeStatusResponse.IndexVersion getIndexVersion() { - return new GetFeatureUpgradeStatusResponse.IndexVersion( + private static GetFeatureUpgradeStatusResponse.IndexInfo getIndexInfo() { + return new GetFeatureUpgradeStatusResponse.IndexInfo( randomAlphaOfLengthBetween(3, 20), - randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()) + randomFrom(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()), + null ); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java index 26118a7c73091..db2b2f3d09a62 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusActionTests.java @@ -20,9 +20,8 @@ import java.util.ArrayList; import java.util.List; -import java.util.Map; -import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.NO_MIGRATION_NEEDED; +import static org.elasticsearch.action.admin.cluster.migration.GetFeatureUpgradeStatusResponse.UpgradeStatus.MIGRATION_NEEDED; import static org.hamcrest.Matchers.equalTo; public class TransportGetFeatureUpgradeStatusActionTests extends ESTestCase { @@ -35,27 +34,27 @@ public void testGetFeatureStatus() { GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus status = TransportGetFeatureUpgradeStatusAction.getFeatureUpgradeStatus( CLUSTER_STATE, - Map.entry("test-feature", FEATURE)); + FEATURE); - assertThat(status.getUpgradeStatus(), equalTo(NO_MIGRATION_NEEDED)); + assertThat(status.getUpgradeStatus(), equalTo(MIGRATION_NEEDED)); assertThat(status.getFeatureName(), equalTo("test-feature")); assertThat(status.getMinimumIndexVersion(), equalTo(Version.V_7_0_0)); assertThat(status.getIndexVersions().size(), equalTo(2)); // additional testing below } - public void testGetIndexVersion() { - List versions = - TransportGetFeatureUpgradeStatusAction.getIndexVersions(CLUSTER_STATE, FEATURE); + public void testGetIndexInfos() { + List versions = + TransportGetFeatureUpgradeStatusAction.getIndexInfos(CLUSTER_STATE, FEATURE); assertThat(versions.size(), equalTo(2)); { - GetFeatureUpgradeStatusResponse.IndexVersion version = versions.get(0); + GetFeatureUpgradeStatusResponse.IndexInfo version = versions.get(0); assertThat(version.getVersion(), equalTo(Version.CURRENT)); assertThat(version.getIndexName(), equalTo(".test-index-1")); } { - GetFeatureUpgradeStatusResponse.IndexVersion version = versions.get(1); + GetFeatureUpgradeStatusResponse.IndexInfo version = versions.get(1); assertThat(version.getVersion(), equalTo(Version.V_7_0_0)); assertThat(version.getIndexName(), equalTo(".test-index-2")); } diff --git a/server/src/test/java/org/elasticsearch/upgrades/FeatureMigrationResultsTests.java b/server/src/test/java/org/elasticsearch/upgrades/FeatureMigrationResultsTests.java new file mode 100644 index 0000000000000..542c4c39a2b97 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/upgrades/FeatureMigrationResultsTests.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.test.AbstractDiffableSerializationTestCase; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +public class FeatureMigrationResultsTests extends AbstractDiffableSerializationTestCase { + + @Override + protected FeatureMigrationResults createTestInstance() { + return new FeatureMigrationResults(randomMap(0, 10, () -> new Tuple<>(randomAlphaOfLength(5), randomFeatureStatus()))); + } + + private SingleFeatureMigrationResult randomFeatureStatus() { + if (randomBoolean()) { + return SingleFeatureMigrationResult.success(); + } else { + return SingleFeatureMigrationResult.failure( + randomAlphaOfLength(8), + new RuntimeException(randomAlphaOfLength(5)) + ); + } + } + + @Override + protected FeatureMigrationResults mutateInstance(Metadata.Custom instance) { + int oldSize = ((FeatureMigrationResults) instance).getFeatureStatuses().size(); + if (oldSize == 0 || randomBoolean()) { + return new FeatureMigrationResults( + randomMap(oldSize + 1, oldSize + 5, () -> new Tuple<>(randomAlphaOfLength(5), randomFeatureStatus())) + ); + } else { + return new FeatureMigrationResults(randomMap(0, oldSize, () -> new Tuple<>(randomAlphaOfLength(5), randomFeatureStatus()))); + } + } + + /** + * Disable assertions of XContent equivalence - the exception prevents this from working as it translates everything + * into ElasticsearchException. + */ + @Override protected boolean assertToXContentEquivalence() { + return false; + } + + @Override + protected Writeable.Reader instanceReader() { + return FeatureMigrationResults::new; + } + + @Override protected FeatureMigrationResults doParseInstance(XContentParser parser) throws IOException { + return FeatureMigrationResults.fromXContent(parser); + } + + @Override protected Metadata.Custom makeTestChanges(Metadata.Custom testInstance) { + return mutateInstance(testInstance); + } + + @Override protected Writeable.Reader> diffReader() { + return FeatureMigrationResults.ResultsDiff::new; + } +} diff --git a/server/src/test/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskParamsTests.java b/server/src/test/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskParamsTests.java new file mode 100644 index 0000000000000..1fa9cb905dc5c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskParamsTests.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.test.AbstractNamedWriteableTestCase; + +import java.io.IOException; +import java.util.Collections; + +public class SystemIndexMigrationTaskParamsTests extends AbstractNamedWriteableTestCase { + + // NOTE: This test case does not currently implement mutateInstance, because all instances of the class + // are equal and have the same hashcode (for now). + + @Override + protected SystemIndexMigrationTaskParams createTestInstance() { + return new SystemIndexMigrationTaskParams(); + } + + @Override + protected SystemIndexMigrationTaskParams copyInstance(SystemIndexMigrationTaskParams instance, Version version) throws IOException { + return new SystemIndexMigrationTaskParams(); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Collections.singletonList( + new NamedWriteableRegistry.Entry( + SystemIndexMigrationTaskParams.class, + SystemIndexMigrationTaskParams.SYSTEM_INDEX_UPGRADE_TASK_NAME, + SystemIndexMigrationTaskParams::new + ) + ) + ); + } + + @Override + protected Class categoryClass() { + return SystemIndexMigrationTaskParams.class; + } +} diff --git a/server/src/test/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskStateTests.java b/server/src/test/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskStateTests.java new file mode 100644 index 0000000000000..7b8a75ce77588 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/upgrades/SystemIndexMigrationTaskStateTests.java @@ -0,0 +1,103 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.test.AbstractNamedWriteableTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +public class SystemIndexMigrationTaskStateTests extends AbstractNamedWriteableTestCase { + + @Override + protected SystemIndexMigrationTaskState createTestInstance() { + return new SystemIndexMigrationTaskState( + randomAlphaOfLengthBetween(5, 20), + randomAlphaOfLengthBetween(5, 20), + randomFeatureCallbackMetadata() + ); + } + + private Map randomFeatureCallbackMetadata() { + return randomMap(0, 10, () -> new Tuple<>(randomAlphaOfLength(5), randomMetadataValue())); + } + + private Object randomMetadataValue() { + switch (randomIntBetween(0, 7)) { + case 0: + return randomMap(0, 3, () -> new Tuple<>(randomAlphaOfLength(5), randomMetadataValue())); + case 1: + return randomList(0, 3, this::randomMetadataValue); + case 2: + return randomLong(); + case 3: + return randomShort(); + case 4: + return randomBoolean(); + case 5: + return randomFloat(); + case 6: + return randomDouble(); + case 7: + return randomAlphaOfLengthBetween(5, 10); + } + throw new AssertionError("bad randomization"); + } + + @Override + protected SystemIndexMigrationTaskState copyInstance(SystemIndexMigrationTaskState instance, Version version) throws IOException { + return new SystemIndexMigrationTaskState( + instance.getCurrentIndex(), + instance.getCurrentFeature(), + instance.getFeatureCallbackMetadata() + ); + } + + @Override protected SystemIndexMigrationTaskState mutateInstance(SystemIndexMigrationTaskState instance) throws IOException { + String index = instance.getCurrentIndex(); + String feature = instance.getCurrentFeature(); + Map featureMetadata = instance.getFeatureCallbackMetadata(); + switch (randomIntBetween(0, 2)) { + case 0: + index = randomValueOtherThan(instance.getCurrentIndex(), () -> randomAlphaOfLengthBetween(5, 20)); + break; + case 1: + feature = randomValueOtherThan(instance.getCurrentFeature(), () -> randomAlphaOfLengthBetween(5, 20)); + break; + case 2: + featureMetadata = randomValueOtherThan(instance.getFeatureCallbackMetadata(), this::randomFeatureCallbackMetadata); + break; + default: + assert false : "invalid randomization case"; + } + return new SystemIndexMigrationTaskState(index, feature, featureMetadata); + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry( + Collections.singletonList( + new NamedWriteableRegistry.Entry( + SystemIndexMigrationTaskState.class, + SystemIndexMigrationTaskParams.SYSTEM_INDEX_UPGRADE_TASK_NAME, + SystemIndexMigrationTaskState::new + ) + ) + ); + } + + @Override + protected Class categoryClass() { + return SystemIndexMigrationTaskState.class; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java index d97cbaa5b5124..acc387d42bf97 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java @@ -36,7 +36,11 @@ public final class SystemPrivilege extends Privilege { RetentionLeaseActions.Remove.ACTION_NAME + "*", // needed for CCR to remove retention leases RetentionLeaseActions.Renew.ACTION_NAME + "*", // needed for CCR to renew retention leases "indices:admin/settings/update", // needed for DiskThresholdMonitor.markIndicesReadOnly - CompletionPersistentTaskAction.NAME // needed for ShardFollowTaskCleaner + CompletionPersistentTaskAction.NAME, // needed for ShardFollowTaskCleaner + "indices:data/write/*", // needed for SystemIndexMigrator + "indices:data/read/*", // needed for SystemIndexMigrator + "indices:admin/refresh", // needed for SystemIndexMigrator + "indices:admin/aliases" // needed for SystemIndexMigrator ); private static final Predicate PREDICATE = (action) -> { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java index 354bcc97d6fa6..b79dbb5a4a1c2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java @@ -10,11 +10,11 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.enrich.action.DeleteEnrichPolicyAction; import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; import org.elasticsearch.xpack.core.enrich.action.GetEnrichPolicyAction; import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; -import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authz.permission.ClusterPermission; import org.elasticsearch.xpack.core.security.support.Automatons; @@ -155,7 +155,7 @@ public void testSystem() throws Exception { assertThat(predicate.test("cluster:admin/whatever"), is(false)); assertThat(predicate.test("indices:admin/mapping/put"), is(true)); assertThat(predicate.test("indices:admin/mapping/whatever"), is(false)); - assertThat(predicate.test("internal:transport/proxy/indices:data/read/query"), is(false)); + assertThat(predicate.test("internal:transport/proxy/indices:data/read/query"), is(true)); assertThat(predicate.test("internal:transport/proxy/indices:monitor/whatever"), is(true)); assertThat(predicate.test("indices:admin/seq_no/global_checkpoint_sync"), is(true)); assertThat(predicate.test("indices:admin/seq_no/global_checkpoint_sync[p]"), is(true));