diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 5a11f8b02bf84..f012b1b27a7aa 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -30,6 +30,8 @@ dependencies { compile 'com.google.guava:guava:20.0' compile 'org.apache.commons:commons-lang3:3.4' testCompile project(':test:fixtures:azure-fixture') + // required by the test for the encrypted Azure repository + testCompile project(path: ':x-pack:plugin:repository-encrypted', configuration: 'testArtifacts') } dependencyLicenses { diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index b23693fd268d4..3adbf5858c348 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -74,17 +74,20 @@ protected HttpHandler createErroneousHttpHandler(final HttpHandler delegate) { @Override protected Settings nodeSettings(int nodeOrdinal) { + final String endpoint = "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=" + httpServerUrl(); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(AzureStorageSettings.ENDPOINT_SUFFIX_SETTING.getConcreteSettingForNamespace("test").getKey(), endpoint) + .setSecureSettings(nodeSecureSettings(nodeOrdinal)) + .build(); + } + + protected MockSecureSettings nodeSecureSettings(int nodeOrdinal) { + MockSecureSettings secureSettings = new MockSecureSettings(); final String key = Base64.getEncoder().encodeToString(randomAlphaOfLength(10).getBytes(StandardCharsets.UTF_8)); - final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString(AzureStorageSettings.ACCOUNT_SETTING.getConcreteSettingForNamespace("test").getKey(), "account"); secureSettings.setString(AzureStorageSettings.KEY_SETTING.getConcreteSettingForNamespace("test").getKey(), key); - - final String endpoint = "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=" + httpServerUrl(); - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(AzureStorageSettings.ENDPOINT_SUFFIX_SETTING.getConcreteSettingForNamespace("test").getKey(), endpoint) - .setSecureSettings(secureSettings) - .build(); + return secureSettings; } /** diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/EncryptedAzureBlobStoreRepositoryIntegTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/EncryptedAzureBlobStoreRepositoryIntegTests.java new file mode 100644 index 0000000000000..51ec89292bd2b --- /dev/null +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/EncryptedAzureBlobStoreRepositoryIntegTests.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.azure; + +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.encrypted.DecryptionPacketsInputStream; +import org.elasticsearch.repositories.encrypted.EncryptedRepository; +import org.elasticsearch.repositories.encrypted.EncryptedRepositoryPlugin; +import org.elasticsearch.repositories.encrypted.LocalStateEncryptedRepositoryPlugin; +import org.elasticsearch.test.ESTestCase; +import org.junit.BeforeClass; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public class EncryptedAzureBlobStoreRepositoryIntegTests extends AzureBlobStoreRepositoryTests { + + private static List repositoryNames; + + @BeforeClass + private static void preGenerateRepositoryNames() { + List names = new ArrayList<>(); + for (int i = 0; i < 32; i++) { + names.add("test-repo-" + i); + } + repositoryNames = Collections.synchronizedList(names); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), License.LicenseType.TRIAL.getTypeName()) + .build(); + } + + @Override + protected MockSecureSettings nodeSecureSettings(int nodeOrdinal) { + MockSecureSettings secureSettings = super.nodeSecureSettings(nodeOrdinal); + for (String repositoryName : repositoryNames) { + secureSettings.setString(EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING. + getConcreteSettingForNamespace(repositoryName).getKey(), "password" + repositoryName); + } + return secureSettings; + } + + @Override + protected String randomRepositoryName() { + return repositoryNames.remove(randomIntBetween(0, repositoryNames.size() - 1)); + } + + protected long blobLengthFromDiskLength(BlobMetaData blobMetaData) { + if (BlobStoreRepository.INDEX_LATEST_BLOB.equals(blobMetaData.name())) { + // index.latest is not encrypted, hence the size on disk is equal to the content + return blobMetaData.length(); + } else { + return DecryptionPacketsInputStream.getDecryptionLength(blobMetaData.length() - + EncryptedRepository.MetadataIdentifier.byteLength(), EncryptedRepository.PACKET_LENGTH_IN_BYTES); + } + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateEncryptedRepositoryPlugin.class, TestAzureRepositoryPlugin.class); + } + + @Override + protected String repositoryType() { + return EncryptedRepositoryPlugin.REPOSITORY_TYPE_NAME; + } + + @Override + protected Settings repositorySettings() { + final Settings.Builder settings = Settings.builder(); + settings.put(super.repositorySettings()); + settings.put(EncryptedRepositoryPlugin.DELEGATE_TYPE.getKey(), AzureRepository.TYPE); + if (ESTestCase.randomBoolean()) { + long size = 1 << ESTestCase.randomInt(10); + settings.put("chunk_size", new ByteSizeValue(size, ByteSizeUnit.KB)); + } + return settings.build(); + } +} diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index bd20c36922297..c3a39c0696549 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -54,6 +54,8 @@ dependencies { compile 'com.google.apis:google-api-services-storage:v1-rev20191011-1.30.3' testCompile project(':test:fixtures:gcs-fixture') + // required by the test for the encrypted GCS repository + testCompile project(path: ':x-pack:plugin:repository-encrypted', configuration: 'testArtifacts') } dependencyLicenses { diff --git a/plugins/repository-gcs/qa/google-cloud-storage/build.gradle b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle index aaa5a4ddc553b..94e4ccd22c562 100644 --- a/plugins/repository-gcs/qa/google-cloud-storage/build.gradle +++ b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle @@ -34,6 +34,9 @@ apply plugin: 'elasticsearch.test.fixtures' // TODO think about flattening qa:google-cloud-storage project into parent dependencies { testCompile project(path: ':plugins:repository-gcs') + // required by the third-party test for the encrypted GCS repository + testCompile project(path: ':x-pack:plugin:repository-encrypted') + testCompile project(path: ':x-pack:plugin:core') } testFixtures.useFixture(':test:fixtures:gcs-fixture') @@ -101,6 +104,7 @@ task thirdPartyTest(type: Test) { } include '**/GoogleCloudStorageThirdPartyTests.class' + include '**/EncryptedGoogleCloudStorageThirdPartyTests.class' systemProperty 'tests.security.manager', false systemProperty 'test.google.bucket', gcsBucket nonInputProperties.systemProperty 'test.google.base', gcsBasePath + "_third_party_tests_" + BuildParams.testSeed @@ -126,10 +130,14 @@ processTestResources { integTest { dependsOn project(':plugins:repository-gcs').bundlePlugin + dependsOn project(':x-pack:plugin:core').bundlePlugin + dependsOn project(':x-pack:plugin:repository-encrypted').bundlePlugin } testClusters.integTest { plugin file(project(':plugins:repository-gcs').bundlePlugin.archiveFile) + plugin file(project(':x-pack:plugin:core').bundlePlugin.archiveFile) + plugin file(project(':x-pack:plugin:repository-encrypted').bundlePlugin.archiveFile) keystore 'gcs.client.integration_test.credentials_file', serviceAccountFile, IGNORE_VALUE diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/EncryptedGoogleCloudStorageThirdPartyTests.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/EncryptedGoogleCloudStorageThirdPartyTests.java new file mode 100644 index 0000000000000..011c5d7061928 --- /dev/null +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/EncryptedGoogleCloudStorageThirdPartyTests.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.gcs; + +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.encrypted.EncryptedRepository; +import org.elasticsearch.repositories.encrypted.EncryptedRepositoryPlugin; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class EncryptedGoogleCloudStorageThirdPartyTests extends GoogleCloudStorageThirdPartyTests { + + @Override + protected Collection> getPlugins() { + return pluginList(XPackPlugin.class, EncryptedRepositoryPlugin.class, GoogleCloudStoragePlugin.class); + } + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put("xpack.license.self_generated.type", "trial") + .build(); + } + + @Override + protected SecureSettings credentials() { + MockSecureSettings secureSettings = (MockSecureSettings) super.credentials(); + secureSettings.setString(EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING. + getConcreteSettingForNamespace("test-encrypted-repo").getKey(), "password-test-repo"); + return secureSettings; + } + + @Override + protected void createRepository(final String repoName) { + AcknowledgedResponse putRepositoryResponse = client().admin().cluster() + .preparePutRepository("test-encrypted-repo") + .setType("encrypted") + .setSettings(Settings.builder() + .put("delegate_type", "gcs") + .put("bucket", System.getProperty("test.google.bucket")) + .put("base_path", System.getProperty("test.google.base", "") + + "/" + EncryptedGoogleCloudStorageThirdPartyTests.class.getName() ) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } + + @Override + protected void assertCleanupResponse(CleanupRepositoryResponse response, long bytes, long blobs) { + // TODO cleanup of root blobs does not count the encryption metadata blobs, but the cleanup of blob containers ("indices" folder) + // does count them; ideally there should be consistency, one way or the other + assertThat(response.result().blobs(), equalTo(1L + 2L + 1L /* one metadata blob */)); + // the cleanup stats of the encrypted repository currently includes only some of the metadata blobs (as per above), which are + // themselves cumbersome to size; but the bytes count is stable + assertThat(response.result().bytes(), equalTo(244L)); + } + + @Override + protected void assertBlobsByPrefix(BlobPath path, String prefix, Map blobs) throws Exception { + // blobs are larger after encryption + Map blobsWithSizeAfterEncryption = new HashMap<>(); + blobs.forEach((name, meta) -> { + blobsWithSizeAfterEncryption.put(name, new BlobMetaData() { + @Override + public String name() { + return meta.name(); + } + + @Override + public long length() { + return EncryptedRepository.getEncryptedBlobByteLength(meta.length()); + } + }); + }); + super.assertBlobsByPrefix(path, prefix, blobsWithSizeAfterEncryption); + } + + @Override + protected String getTestRepoName() { + return "test-encrypted-repo"; + } + +} diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java index 0096198791e39..03b477dadbf1a 100644 --- a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -69,11 +69,11 @@ protected SecureSettings credentials() { @Override protected void createRepository(final String repoName) { - AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository(repoName) .setType("gcs") .setSettings(Settings.builder() .put("bucket", System.getProperty("test.google.bucket")) - .put("base_path", System.getProperty("test.google.base", "/")) + .put("base_path", System.getProperty("test.google.base", "") + "/" + GoogleCloudStorageThirdPartyTests.class.getName() ) ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/EncryptedGCSBlobStoreRepositoryIntegTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/EncryptedGCSBlobStoreRepositoryIntegTests.java new file mode 100644 index 0000000000000..103f79b0b5f8e --- /dev/null +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/EncryptedGCSBlobStoreRepositoryIntegTests.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.gcs; + +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.encrypted.DecryptionPacketsInputStream; +import org.elasticsearch.repositories.encrypted.EncryptedRepository; +import org.elasticsearch.repositories.encrypted.EncryptedRepositoryPlugin; +import org.elasticsearch.repositories.encrypted.LocalStateEncryptedRepositoryPlugin; +import org.elasticsearch.test.ESTestCase; +import org.junit.BeforeClass; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public class EncryptedGCSBlobStoreRepositoryIntegTests extends GoogleCloudStorageBlobStoreRepositoryTests { + + private static List repositoryNames; + + @BeforeClass + private static void preGenerateRepositoryNames() { + List names = new ArrayList<>(); + for (int i = 0; i < 32; i++) { + names.add("test-repo-" + i); + } + repositoryNames = Collections.synchronizedList(names); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), License.LicenseType.TRIAL.getTypeName()) + .build(); + } + + @Override + protected MockSecureSettings nodeSecureSettings(int nodeOrdinal) { + MockSecureSettings secureSettings = super.nodeSecureSettings(nodeOrdinal); + for (String repositoryName : repositoryNames) { + secureSettings.setString(EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING. + getConcreteSettingForNamespace(repositoryName).getKey(), "password" + repositoryName); + } + return secureSettings; + } + + @Override + protected String randomRepositoryName() { + return repositoryNames.remove(randomIntBetween(0, repositoryNames.size() - 1)); + } + + protected long blobLengthFromDiskLength(BlobMetaData blobMetaData) { + if (BlobStoreRepository.INDEX_LATEST_BLOB.equals(blobMetaData.name())) { + // index.latest is not encrypted, hence the size on disk is equal to the content + return blobMetaData.length(); + } else { + return DecryptionPacketsInputStream.getDecryptionLength(blobMetaData.length() - + EncryptedRepository.MetadataIdentifier.byteLength(), EncryptedRepository.PACKET_LENGTH_IN_BYTES); + } + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateEncryptedRepositoryPlugin.class, TestGoogleCloudStoragePlugin.class); + } + + @Override + protected String repositoryType() { + return EncryptedRepositoryPlugin.REPOSITORY_TYPE_NAME; + } + + @Override + protected Settings repositorySettings() { + final Settings.Builder settings = Settings.builder(); + settings.put(super.repositorySettings()); + settings.put(EncryptedRepositoryPlugin.DELEGATE_TYPE.getKey(), GoogleCloudStorageRepository.TYPE); + if (ESTestCase.randomBoolean()) { + long size = 1 << ESTestCase.randomInt(10); + settings.put("chunk_size", new ByteSizeValue(size, ByteSizeUnit.KB)); + } + return settings.build(); + } + +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 24ed35e550491..031c822884d79 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -103,20 +103,23 @@ protected HttpHandler createErroneousHttpHandler(final HttpHandler delegate) { @Override protected Settings nodeSettings(int nodeOrdinal) { - final Settings.Builder settings = Settings.builder(); - settings.put(super.nodeSettings(nodeOrdinal)); - settings.put(ENDPOINT_SETTING.getConcreteSettingForNamespace("test").getKey(), httpServerUrl()); - settings.put(TOKEN_URI_SETTING.getConcreteSettingForNamespace("test").getKey(), httpServerUrl() + "/token"); + final Settings.Builder settings = Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(ENDPOINT_SETTING.getConcreteSettingForNamespace("test").getKey(), httpServerUrl()) + .put(TOKEN_URI_SETTING.getConcreteSettingForNamespace("test").getKey(), httpServerUrl() + "/token") + .setSecureSettings(nodeSecureSettings(nodeOrdinal)); + return settings.build(); + } + protected MockSecureSettings nodeSecureSettings(int nodeOrdinal) { final MockSecureSettings secureSettings = new MockSecureSettings(); final byte[] serviceAccount = TestUtils.createServiceAccount(random()); secureSettings.setFile(CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace("test").getKey(), serviceAccount); - settings.setSecureSettings(secureSettings); - return settings.build(); + return secureSettings; } public void testDeleteSingleItem() { - final String repoName = createRepository(randomName()); + final String repoName = createRepository(randomRepositoryName()); final RepositoriesService repositoriesService = internalCluster().getMasterNodeInstance(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repoName); PlainActionFuture.get(f -> repository.threadPool().generic().execute(ActionRunnable.run(f, () -> diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index a92f07b71ee01..fc418178cd346 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -53,6 +53,8 @@ dependencies { compile 'javax.xml.bind:jaxb-api:2.2.2' testCompile project(':test:fixtures:s3-fixture') + // required by the test for the encrypted S3 repository + testCompile project(path: ':x-pack:plugin:repository-encrypted', configuration: 'testArtifacts') } dependencyLicenses { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/EncryptedS3BlobStoreRepositoryIntegTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/EncryptedS3BlobStoreRepositoryIntegTests.java new file mode 100644 index 0000000000000..0da596fa8d4ca --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/EncryptedS3BlobStoreRepositoryIntegTests.java @@ -0,0 +1,110 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.encrypted.DecryptionPacketsInputStream; +import org.elasticsearch.repositories.encrypted.EncryptedRepository; +import org.elasticsearch.repositories.encrypted.EncryptedRepositoryPlugin; +import org.elasticsearch.repositories.encrypted.LocalStateEncryptedRepositoryPlugin; +import org.junit.BeforeClass; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public class EncryptedS3BlobStoreRepositoryIntegTests extends S3BlobStoreRepositoryTests { + + private static List repositoryNames; + + @BeforeClass + private static void preGenerateRepositoryNames() { + List names = new ArrayList<>(); + for (int i = 0; i < 32; i++) { + names.add("test-repo-" + i); + } + repositoryNames = Collections.synchronizedList(names); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), License.LicenseType.TRIAL.getTypeName()) + .build(); + } + + @Override + protected MockSecureSettings nodeSecureSettings(int nodeOrdinal) { + MockSecureSettings secureSettings = super.nodeSecureSettings(nodeOrdinal); + for (String repositoryName : repositoryNames) { + secureSettings.setString(EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING. + getConcreteSettingForNamespace(repositoryName).getKey(), "password" + repositoryName); + } + return secureSettings; + } + + @Override + protected String randomRepositoryName() { + return repositoryNames.remove(randomIntBetween(0, repositoryNames.size() - 1)); + } + + protected long blobLengthFromDiskLength(BlobMetaData blobMetaData) { + if (BlobStoreRepository.INDEX_LATEST_BLOB.equals(blobMetaData.name())) { + // index.latest is not encrypted, hence the size on disk is equal to the content + return blobMetaData.length(); + } else { + return DecryptionPacketsInputStream.getDecryptionLength(blobMetaData.length() - + EncryptedRepository.MetadataIdentifier.byteLength(), EncryptedRepository.PACKET_LENGTH_IN_BYTES); + } + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateEncryptedRepositoryPlugin.class, TestS3RepositoryPlugin.class); + } + + @Override + protected String repositoryType() { + return EncryptedRepositoryPlugin.REPOSITORY_TYPE_NAME; + } + + @Override + protected Settings repositorySettings() { + final Settings.Builder settings = Settings.builder(); + settings.put(super.repositorySettings()); + settings.put(EncryptedRepositoryPlugin.DELEGATE_TYPE.getKey(), S3Repository.TYPE); + return settings.build(); + } + + @Override + public void testEnforcedCooldownPeriod() { + // this test is not applicable for the encrypted repository because it verifies behavior which pertains to snapshots that must + // be created before the encrypted repository was introduced, hence no such encrypted snapshots can possibly exist + } + +} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index c28f6fbb66e7b..f8ae1ec594eca 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -118,10 +118,6 @@ protected HttpHandler createErroneousHttpHandler(final HttpHandler delegate) { @Override protected Settings nodeSettings(int nodeOrdinal) { - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(S3ClientSettings.ACCESS_KEY_SETTING.getConcreteSettingForNamespace("test").getKey(), "access"); - secureSettings.setString(S3ClientSettings.SECRET_KEY_SETTING.getConcreteSettingForNamespace("test").getKey(), "secret"); - final Settings.Builder builder = Settings.builder() .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that verify an exact wait time .put(S3ClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace("test").getKey(), httpServerUrl()) @@ -130,7 +126,7 @@ protected Settings nodeSettings(int nodeOrdinal) { // Disable request throttling because some random values in tests might generate too many failures for the S3 client .put(S3ClientSettings.USE_THROTTLE_RETRIES_SETTING.getConcreteSettingForNamespace("test").getKey(), false) .put(super.nodeSettings(nodeOrdinal)) - .setSecureSettings(secureSettings); + .setSecureSettings(nodeSecureSettings(nodeOrdinal)); if (signerOverride != null) { builder.put(S3ClientSettings.SIGNER_OVERRIDE.getConcreteSettingForNamespace("test").getKey(), signerOverride); @@ -141,8 +137,15 @@ protected Settings nodeSettings(int nodeOrdinal) { return builder.build(); } + protected MockSecureSettings nodeSecureSettings(int nodeOrdinal) { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(S3ClientSettings.ACCESS_KEY_SETTING.getConcreteSettingForNamespace("test").getKey(), "access"); + secureSettings.setString(S3ClientSettings.SECRET_KEY_SETTING.getConcreteSettingForNamespace("test").getKey(), "secret"); + return secureSettings; + } + public void testEnforcedCooldownPeriod() throws IOException { - final String repoName = createRepository(randomName(), Settings.builder().put(repositorySettings()) + final String repoName = createRepository(randomRepositoryName(), Settings.builder().put(repositorySettings()) .put(S3Repository.COOLDOWN_PERIOD.getKey(), TEST_COOLDOWN_PERIOD).build()); final SnapshotId fakeOldSnapshot = client().admin().cluster().prepareCreateSnapshot(repoName, "snapshot-old") diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java index d3acd02a06d1f..b107eb4c9010a 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java @@ -25,6 +25,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Objects; /** * The list of paths where a blob can reside. The contents of the paths are dependent upon the implementation of {@link BlobContainer}. @@ -62,6 +63,12 @@ public BlobPath add(String path) { return new BlobPath(Collections.unmodifiableList(paths)); } + public BlobPath append(BlobPath otherPath) { + List paths = new ArrayList<>(this.paths); + paths.addAll(otherPath.paths); + return new BlobPath(Collections.unmodifiableList(paths)); + } + public String buildAsString() { String p = String.join(SEPARATOR, paths); if (p.isEmpty() || p.endsWith(SEPARATOR)) { @@ -92,4 +99,17 @@ public String toString() { } return sb.toString(); } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + BlobPath other = (BlobPath) o; + return Objects.equals(paths, other.paths); + } + + @Override + public int hashCode() { + return Objects.hash(paths); + } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index fdb70cffb133f..7f191a1005933 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -981,7 +981,7 @@ public IndexMetaData getSnapshotIndexMetaData(final SnapshotId snapshotId, final } } - private BlobPath indicesPath() { + protected BlobPath indicesPath() { return basePath().add("indices"); } @@ -1066,7 +1066,7 @@ public void endVerification(String seed) { // Tracks the latest known repository generation in a best-effort way to detect inconsistent listing of root level index-N blobs // and concurrent modifications. - private final AtomicLong latestKnownRepoGen = new AtomicLong(RepositoryData.UNKNOWN_REPO_GEN); + protected final AtomicLong latestKnownRepoGen = new AtomicLong(RepositoryData.UNKNOWN_REPO_GEN); // Best effort cache of the latest known repository data and its generation, cached serialized as compressed json private final AtomicReference> latestKnownRepositoryData = new AtomicReference<>(); @@ -1400,14 +1400,8 @@ public void onFailure(Exception e) { writeAtomic(indexBlob, BytesReference.bytes(filteredRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), writeShardGens)), true); // write the current generation to the index-latest file - final BytesReference genBytes; - try (BytesStreamOutput bStream = new BytesStreamOutput()) { - bStream.writeLong(newGen); - genBytes = bStream.bytes(); - } logger.debug("Repository [{}] updating index.latest with generation [{}]", metadata.name(), newGen); - - writeAtomic(INDEX_LATEST_BLOB, genBytes, false); + writeSnapshotIndexLatestBlob(newGen); // Step 3: Update CS to reflect new repository generation. clusterService.submitStateUpdateTask("set safe repository generation [" + metadata.name() + "][" + newGen + "]", @@ -1499,11 +1493,21 @@ long latestIndexBlobId() throws IOException { } } - // package private for testing - long readSnapshotIndexLatestBlob() throws IOException { + // protected for tests and to allow subclasses to override + protected long readSnapshotIndexLatestBlob() throws IOException { return Numbers.bytesToLong(Streams.readFully(blobContainer().readBlob(INDEX_LATEST_BLOB)).toBytesRef()); } + // protected to allow subclasses to override + protected void writeSnapshotIndexLatestBlob(long newGen) throws IOException { + final BytesReference genBytes; + try (BytesStreamOutput bStream = new BytesStreamOutput()) { + bStream.writeLong(newGen); + genBytes = bStream.bytes(); + } + writeAtomic(INDEX_LATEST_BLOB, genBytes, false); + } + private long listBlobsToGetLatestIndexId() throws IOException { return latestGeneration(blobContainer().listBlobsByPrefix(INDEX_FILE_PREFIX).keySet()); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java index efc0e653edf5c..e632b4d2a2b6c 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -69,14 +69,14 @@ protected Settings nodeSettings() { @Override public void setUp() throws Exception { super.setUp(); - createRepository("test-repo"); + createRepository(getTestRepoName()); deleteAndAssertEmpty(getRepository().basePath()); } @Override public void tearDown() throws Exception { deleteAndAssertEmpty(getRepository().basePath()); - client().admin().cluster().prepareDeleteRepository("test-repo").get(); + client().admin().cluster().prepareDeleteRepository(getTestRepoName()).get(); super.tearDown(); } @@ -112,7 +112,7 @@ public void testCreateSnapshot() { logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client().admin() .cluster() - .prepareCreateSnapshot("test-repo", snapshotName) + .prepareCreateSnapshot(getTestRepoName(), snapshotName) .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-3") .get(); @@ -122,17 +122,17 @@ public void testCreateSnapshot() { assertThat(client().admin() .cluster() - .prepareGetSnapshots("test-repo") + .prepareGetSnapshots(getTestRepoName()) .setSnapshots(snapshotName) .get() - .getSnapshots("test-repo") + .getSnapshots(getTestRepoName()) .get(0) .state(), equalTo(SnapshotState.SUCCESS)); assertTrue(client().admin() .cluster() - .prepareDeleteSnapshot("test-repo", snapshotName) + .prepareDeleteSnapshot(getTestRepoName(), snapshotName) .get() .isAcknowledged()); } @@ -188,7 +188,7 @@ public void testCleanup() throws Exception { logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client().admin() .cluster() - .prepareCreateSnapshot("test-repo", snapshotName) + .prepareCreateSnapshot(getTestRepoName(), snapshotName) .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-3") .get(); @@ -198,16 +198,16 @@ public void testCleanup() throws Exception { assertThat(client().admin() .cluster() - .prepareGetSnapshots("test-repo") + .prepareGetSnapshots(getTestRepoName()) .setSnapshots(snapshotName) .get() - .getSnapshots("test-repo") + .getSnapshots(getTestRepoName()) .get(0) .state(), equalTo(SnapshotState.SUCCESS)); final BlobStoreRepository repo = - (BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository("test-repo"); + (BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository(getTestRepoName()); final Executor genericExec = repo.threadPool().executor(ThreadPool.Names.GENERIC); logger.info("--> creating a dangling index folder"); @@ -215,7 +215,7 @@ public void testCleanup() throws Exception { createDanglingIndex(repo, genericExec); logger.info("--> deleting a snapshot to trigger repository cleanup"); - client().admin().cluster().deleteSnapshot(new DeleteSnapshotRequest("test-repo", snapshotName)).actionGet(); + client().admin().cluster().deleteSnapshot(new DeleteSnapshotRequest(getTestRepoName(), snapshotName)).actionGet(); assertConsistentRepository(repo, genericExec); @@ -223,7 +223,7 @@ public void testCleanup() throws Exception { createDanglingIndex(repo, genericExec); logger.info("--> Execute repository cleanup"); - final CleanupRepositoryResponse response = client().admin().cluster().prepareCleanupRepository("test-repo").get(); + final CleanupRepositoryResponse response = client().admin().cluster().prepareCleanupRepository(getTestRepoName()).get(); assertCleanupResponse(response, 3L, 1L); } @@ -285,6 +285,10 @@ private Set listChildren(BlobPath path) { } protected BlobStoreRepository getRepository() { - return (BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository("test-repo"); + return (BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository(getTestRepoName()); + } + + protected String getTestRepoName() { + return "test-repo"; } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java index 54ea0f26dcd0a..f35f6e8dfb8e9 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java @@ -52,7 +52,6 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.threadpool.ThreadPool; -import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; @@ -116,8 +115,8 @@ public static void assertConsistency(BlobStoreRepository repository, Executor ex executor.execute(ActionRunnable.run(listener, () -> { final BlobContainer blobContainer = repository.blobContainer(); final long latestGen; - try (DataInputStream inputStream = new DataInputStream(blobContainer.readBlob("index.latest"))) { - latestGen = inputStream.readLong(); + try { + latestGen = repository.readSnapshotIndexLatestBlob(); } catch (NoSuchFileException e) { throw new AssertionError("Could not find index.latest blob for repo [" + repository + "]"); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index 6ae627972b62c..b776c749f1fb5 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -175,7 +175,7 @@ public void testList() throws IOException { BlobMetaData blobMetaData = blobs.get(generated.getKey()); assertThat(generated.getKey(), blobMetaData, CoreMatchers.notNullValue()); assertThat(blobMetaData.name(), CoreMatchers.equalTo(generated.getKey())); - assertThat(blobMetaData.length(), CoreMatchers.equalTo(generated.getValue())); + assertThat(blobLengthFromDiskLength(blobMetaData), CoreMatchers.equalTo(generated.getValue())); } assertThat(container.listBlobsByPrefix("foo-").size(), CoreMatchers.equalTo(numberOfFooBlobs)); @@ -185,6 +185,10 @@ public void testList() throws IOException { } } + protected long blobLengthFromDiskLength(BlobMetaData blobMetaData) { + return blobMetaData.length(); + } + public void testDeleteBlobs() throws IOException { try (BlobStore store = newBlobStore()) { final List blobNames = Arrays.asList("foobar", "barfoo"); @@ -262,7 +266,7 @@ protected static void writeBlob(BlobContainer container, String blobName, BytesA } protected BlobStore newBlobStore() { - final String repository = createRepository(randomName()); + final String repository = createRepository(randomRepositoryName()); final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) internalCluster().getMasterNodeInstance(RepositoriesService.class).repository(repository); return PlainActionFuture.get( @@ -270,7 +274,7 @@ protected BlobStore newBlobStore() { } public void testSnapshotAndRestore() throws Exception { - final String repoName = createRepository(randomName()); + final String repoName = createRepository(randomRepositoryName()); int indexCount = randomIntBetween(1, 5); int[] docCounts = new int[indexCount]; String[] indexNames = generateRandomNames(indexCount); @@ -341,7 +345,7 @@ public void testSnapshotAndRestore() throws Exception { } public void testMultipleSnapshotAndRollback() throws Exception { - final String repoName = createRepository(randomName()); + final String repoName = createRepository(randomRepositoryName()); int iterationCount = randomIntBetween(2, 5); int[] docCounts = new int[iterationCount]; String indexName = randomName(); @@ -396,7 +400,7 @@ public void testMultipleSnapshotAndRollback() throws Exception { } public void testIndicesDeletedFromRepository() throws Exception { - final String repoName = createRepository("test-repo"); + final String repoName = createRepository(randomRepositoryName()); Client client = client(); createIndex("test-idx-1", "test-idx-2", "test-idx-3"); ensureGreen(); @@ -493,7 +497,11 @@ private static void assertSuccessfulRestore(RestoreSnapshotResponse response) { assertThat(response.getRestoreInfo().successfulShards(), equalTo(response.getRestoreInfo().totalShards())); } - protected static String randomName() { + protected String randomName() { + return randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + } + + protected String randomRepositoryName() { return randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java index ce36027b81379..25822df60da88 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java @@ -32,8 +32,11 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.mocksocket.MockHttpServer; import org.elasticsearch.test.BackgroundIndexer; +import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -46,6 +49,9 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -71,6 +77,7 @@ protected interface BlobStoreHttpHandler extends HttpHandler { private static final byte[] BUFFER = new byte[1024]; private static HttpServer httpServer; + private static ExecutorService executorService; private Map handlers; private static final Logger log = LogManager.getLogger(); @@ -78,13 +85,18 @@ protected interface BlobStoreHttpHandler extends HttpHandler { @BeforeClass public static void startHttpServer() throws Exception { httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + ThreadFactory threadFactory = EsExecutors.daemonThreadFactory("[" + ESMockAPIBasedRepositoryIntegTestCase.class.getName() + "]"); + executorService = EsExecutors.newScaling(ESMockAPIBasedRepositoryIntegTestCase.class.getName(), 0, 2, 60, TimeUnit.SECONDS, + threadFactory, new ThreadContext(Settings.EMPTY)); httpServer.setExecutor(r -> { - try { - r.run(); - } catch (Throwable t) { - log.error("Error in execution on mock http server IO thread", t); - throw t; - } + executorService.execute(() -> { + try { + r.run(); + } catch (Throwable t) { + log.error("Error in execution on mock http server IO thread", t); + throw t; + } + }); }); httpServer.start(); } @@ -98,6 +110,7 @@ public void setUpHttpServer() { @AfterClass public static void stopHttpServer() { httpServer.stop(0); + ThreadPool.terminate(executorService, 10, TimeUnit.SECONDS); httpServer = null; } @@ -123,7 +136,7 @@ public void tearDownHttpServer() { * Test the snapshot and restore of an index which has large segments files. */ public final void testSnapshotWithLargeSegmentFiles() throws Exception { - final String repository = createRepository(randomName()); + final String repository = createRepository(randomRepositoryName()); final String index = "index-no-merges"; createIndex(index, Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 1d90ef9488789..906104b9436b6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -510,6 +510,19 @@ public boolean isWatcherAllowed() { return isAllowedByLicense(OperationMode.STANDARD); } + /** + * Determines if creating an encrypted snapshot is allowed. Note that restoring an encrypted snapshot is not conditioned upon the + * license operation mode (i.e. it's free for all). + */ + public synchronized boolean isEncryptedSnapshotAllowed() { + return isAllowedByLicense(OperationMode.PLATINUM); + } + + /** + * Monitoring is always available as long as there is a valid license + * + * @return true if the license is active + */ public boolean isMonitoringAllowed() { return allowForAllLicenses(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java index ce1345d4941a8..2f143d7ad0e63 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/XPackLicenseStateTests.java @@ -350,6 +350,22 @@ public void testWatcherInactivePlatinumGoldTrial() throws Exception { assertAllowed(STANDARD, false, XPackLicenseState::isWatcherAllowed, false); } + public void testEncryptedSnapshotsWithInactiveLicense() { + assertAllowed(BASIC, false, XPackLicenseState::isEncryptedSnapshotAllowed, false); + assertAllowed(TRIAL, false, XPackLicenseState::isEncryptedSnapshotAllowed, false); + assertAllowed(GOLD, false, XPackLicenseState::isEncryptedSnapshotAllowed, false); + assertAllowed(PLATINUM, false, XPackLicenseState::isEncryptedSnapshotAllowed, false); + assertAllowed(STANDARD, false, XPackLicenseState::isEncryptedSnapshotAllowed, false); + } + + public void testEncryptedSnapshotsWithActiveLicense() { + assertAllowed(BASIC, true, XPackLicenseState::isEncryptedSnapshotAllowed, false); + assertAllowed(TRIAL, true, XPackLicenseState::isEncryptedSnapshotAllowed, true); + assertAllowed(GOLD, true, XPackLicenseState::isEncryptedSnapshotAllowed, false); + assertAllowed(PLATINUM, true, XPackLicenseState::isEncryptedSnapshotAllowed, true); + assertAllowed(STANDARD, true, XPackLicenseState::isEncryptedSnapshotAllowed, false); + } + public void testGraphPlatinumTrial() throws Exception { assertAllowed(TRIAL, true, XPackLicenseState::isGraphAllowed, true); assertAllowed(PLATINUM, true, XPackLicenseState::isGraphAllowed, true); diff --git a/x-pack/plugin/repository-encrypted/build.gradle b/x-pack/plugin/repository-encrypted/build.gradle index 5a2f82946f711..bef629206701a 100644 --- a/x-pack/plugin/repository-encrypted/build.gradle +++ b/x-pack/plugin/repository-encrypted/build.gradle @@ -8,4 +8,25 @@ esplugin { extendedPlugins = ['x-pack-core'] } +dependencies { + // necessary for the license check + compileOnly project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + integTest.enabled = false + +// test jar is exported by the testArtifacts configuration to be used in the cloud repositories (S3, GCS, Azure) tests +configurations { + testArtifacts.extendsFrom testRuntime +} + +task testJar(type: Jar) { + appendix 'test' + from sourceSets.test.output +} + +artifacts { + archives jar + testArtifacts testJar +} diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/BlobEncryptionMetadata.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/BlobEncryptionMetadata.java new file mode 100644 index 0000000000000..25ee44d71e7aa --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/BlobEncryptionMetadata.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.repositories.encrypted; + +import org.elasticsearch.common.CheckedBiFunction; + +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.Arrays; +import java.util.Objects; + +/** + * Holds the necessary, and sufficient, metadata required to decrypt the associated encrypted blob. + * The data encryption key (DEK {@link #dataEncryptionKey}) is the most important part of the metadata; + * it must be kept secret (i.e. MUST be stored encrypted). + * The metadata does not hold an explicit link to the associated encrypted blob. It's the responsibility of the creator + * ({@link EncryptedRepository}) to maintain this association. + */ +public final class BlobEncryptionMetadata { + + // this is part of the Initialization Vectors of the encrypted data blobs + // although the IVs of the encrypted data blobs are stored in plain in the ciphertext, + // storing it in the metadata as well, is a simpler way to verify the association without + // attempting the decryption (without using this software even, because the {@link #nonce} is the + // first 4-byte integer (little endian) of both the metadata and the associated encrypted blob) + private final int nonce; + // the packet length from {@link EncryptionPacketsInputStream} + private final int packetLengthInBytes; + // the key used to encrypt and decrypt the associated blob + private final SecretKey dataEncryptionKey; + + public BlobEncryptionMetadata(int nonce, int packetLengthInBytes, SecretKey dataEncryptionKey) { + this.nonce = nonce; + this.packetLengthInBytes = packetLengthInBytes; + this.dataEncryptionKey = Objects.requireNonNull(dataEncryptionKey); + } + + public int getNonce() { + return nonce; + } + + public int getPacketLengthInBytes() { + return packetLengthInBytes; + } + + public SecretKey getDataEncryptionKey() { + return dataEncryptionKey; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + BlobEncryptionMetadata metadata = (BlobEncryptionMetadata) o; + return nonce == metadata.nonce && + packetLengthInBytes == metadata.packetLengthInBytes && + Objects.equals(dataEncryptionKey, metadata.dataEncryptionKey); + } + + @Override + public int hashCode() { + int result = Objects.hash(nonce, packetLengthInBytes); + result = 31 * result + Objects.hashCode(dataEncryptionKey); + return result; + } + + static byte[] serializeMetadata(BlobEncryptionMetadata metadata, CheckedBiFunction encryptor) + throws IOException { + ByteBuffer byteBuffer = ByteBuffer.allocate(2 * Integer.BYTES).order(ByteOrder.LITTLE_ENDIAN); + byteBuffer.putInt(0, metadata.getNonce()); + byteBuffer.putInt(Integer.BYTES, metadata.getPacketLengthInBytes()); + byte[] authenticatedData = byteBuffer.array(); + final byte[] encryptedData; + try { + encryptedData = encryptor.apply(metadata.getDataEncryptionKey().getEncoded(), authenticatedData); + } catch (Exception e) { + throw new IOException("Failure to encrypt metadata", e); + } + byte[] result = new byte[authenticatedData.length + encryptedData.length]; + System.arraycopy(authenticatedData, 0, result, 0, authenticatedData.length); + System.arraycopy(encryptedData, 0, result, authenticatedData.length, encryptedData.length); + return result; + } + + static BlobEncryptionMetadata deserializeMetadata(byte[] metadata, CheckedBiFunction decryptor) + throws IOException { + byte[] authenticatedData = Arrays.copyOf(metadata, 2 * Integer.BYTES); + ByteBuffer byteBuffer = ByteBuffer.wrap(authenticatedData).order(ByteOrder.LITTLE_ENDIAN); + int nonce = byteBuffer.getInt(0); + int packetLengthInBytes = byteBuffer.getInt(Integer.BYTES); + byte[] encryptedData = Arrays.copyOfRange(metadata, 2 * Integer.BYTES, metadata.length); + final byte[] decryptedData; + try { + decryptedData = decryptor.apply(encryptedData, authenticatedData); + } catch (Exception e) { + throw new IOException("Failure to decrypt metadata", e); + } + SecretKey dataDecryptionKey = new SecretKeySpec(decryptedData, 0, decryptedData.length, "AES"); + return new BlobEncryptionMetadata(nonce, packetLengthInBytes, dataDecryptionKey); + } +} diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/ChainingInputStream.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/ChainingInputStream.java index ff4b4ec8dc18b..0ee406950ffc6 100644 --- a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/ChainingInputStream.java +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/ChainingInputStream.java @@ -5,13 +5,16 @@ */ package org.elasticsearch.repositories.encrypted; -import java.io.IOException; -import java.io.InputStream; -import java.util.Objects; - import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; +import org.elasticsearch.core.internal.io.IOUtils; + +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.SequenceInputStream; +import java.util.Objects; /** * A {@code ChainingInputStream} concatenates multiple component input streams into a @@ -72,6 +75,74 @@ public abstract class ChainingInputStream extends InputStream { */ private boolean closed; + /** + * Returns a new {@link ChainingInputStream} that concatenates the bytes to be read from the first + * input stream with the bytes from the second input stream. The stream arguments must support + * the {@code mark} and {@code reset} operations; otherwise use {@link SequenceInputStream}. + * + * @param first the input stream supplying the first bytes of the returned {@link ChainingInputStream} + * @param second the input stream supplying the bytes after the {@code first} input stream has been exhausted + */ + public static InputStream chain(InputStream first, InputStream second) { + if (false == Objects.requireNonNull(first).markSupported()) { + throw new IllegalArgumentException("The first component input stream does not support mark"); + } + if (false == Objects.requireNonNull(second).markSupported()) { + throw new IllegalArgumentException("The second component input stream does not support mark"); + } + final InputStream firstComponent = new FilterInputStream(first) { + @Override + public void close() { + // silence close + // "first" can be reused, and the {@code ChainingInputStream} eagerly closes components after every use + // "first" is closed when the returned {@code ChainingInputStream} is closed + } + }; + final InputStream secondComponent = new FilterInputStream(second) { + @Override + public void close() { + // silence close + // "second" can be reused, and the {@code ChainingInputStream} eagerly closes components after every use + // "second" is closed when the returned {@code ChainingInputStream} is closed + } + }; + // be sure to remember the start of components because they might be reused + firstComponent.mark(Integer.MAX_VALUE); + secondComponent.mark(Integer.MAX_VALUE); + + return new ChainingInputStream() { + + @Override + InputStream nextComponent(InputStream currentComponentIn) throws IOException { + if (currentComponentIn == null) { + // when returning the next component, start from its beginning + firstComponent.reset(); + return firstComponent; + } else if (currentComponentIn == firstComponent) { + // when returning the next component, start from its beginning + secondComponent.reset(); + return secondComponent; + } else if (currentComponentIn == secondComponent) { + return null; + } else { + throw new IllegalStateException("Unexpected component input stream"); + } + } + + @Override + public void close() throws IOException { + Exception superException = null; + try { + super.close(); + } catch (Exception e) { + superException = e; + } finally { + IOUtils.close(superException, first, second); + } + } + }; + } + /** * This method is responsible for generating the component input streams. * It is passed the current input stream and must return the successive one, diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/DecryptionPacketsInputStream.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/DecryptionPacketsInputStream.java index 8e8ebc7d9253d..2d77a21020d22 100644 --- a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/DecryptionPacketsInputStream.java +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/DecryptionPacketsInputStream.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.repositories.encrypted; +import org.elasticsearch.core.internal.io.IOUtils; + import javax.crypto.BadPaddingException; import javax.crypto.Cipher; import javax.crypto.IllegalBlockSizeException; @@ -123,6 +125,18 @@ public void reset() throws IOException { throw new IOException("Mark/reset not supported"); } + @Override + public void close() throws IOException { + Exception superException = null; + try { + super.close(); + } catch (IOException e) { + superException = e; + } finally { + IOUtils.close(superException, source); + } + } + private int decrypt(PrefixInputStream packetInputStream) throws IOException { // read only the IV prefix into the packet buffer int ivLength = packetInputStream.readNBytes(packetBuffer, 0, GCM_IV_LENGTH_IN_BYTES); @@ -163,7 +177,7 @@ private int decrypt(PrefixInputStream packetInputStream) throws IOException { private Cipher getPacketDecryptionCipher(byte[] packet) throws IOException { GCMParameterSpec gcmParameterSpec = new GCMParameterSpec(GCM_TAG_LENGTH_IN_BYTES * Byte.SIZE, packet, 0, GCM_IV_LENGTH_IN_BYTES); try { - Cipher packetCipher = Cipher.getInstance(EncryptedRepository.GCM_ENCRYPTION_SCHEME); + Cipher packetCipher = Cipher.getInstance(EncryptedRepository.DATA_ENCRYPTION_SCHEME); packetCipher.init(Cipher.DECRYPT_MODE, secretKey, gcmParameterSpec); return packetCipher; } catch (NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException | InvalidAlgorithmParameterException e) { diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepository.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepository.java index 4a6b2fa801f08..e577c110116a0 100644 --- a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepository.java +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepository.java @@ -3,14 +3,847 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ - package org.elasticsearch.repositories.encrypted; -public class EncryptedRepository { +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.index.IndexCommit; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.StepListener; +import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Numbers; +import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.DeleteResult; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoryCleanupResult; +import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.ShardGenerations; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotShardFailure; +import org.elasticsearch.threadpool.ThreadPool; + +import javax.crypto.AEADBadTagException; +import javax.crypto.KeyGenerator; +import javax.crypto.SecretKey; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.PBEKeySpec; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.charset.StandardCharsets; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.security.spec.InvalidKeySpecException; +import java.util.ArrayList; +import java.util.Base64; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.Executor; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; + +public final class EncryptedRepository extends BlobStoreRepository { + static final Logger logger = LogManager.getLogger(EncryptedRepository.class); + // the following constants are fixed by definition static final int GCM_TAG_LENGTH_IN_BYTES = 16; static final int GCM_IV_LENGTH_IN_BYTES = 12; - static final int AES_BLOCK_SIZE_IN_BYTES = 128; - static final String GCM_ENCRYPTION_SCHEME = "AES/GCM/NoPadding"; + static final int AES_BLOCK_LENGTH_IN_BYTES = 128; + // changing the following constants implies breaking compatibility with previous versions of encrypted snapshots + // in this case the {@link #CURRENT_ENCRYPTION_VERSION_NUMBER} MUST be incremented + static final String DATA_ENCRYPTION_SCHEME = "AES/GCM/NoPadding"; + static final int DATA_KEY_LENGTH_IN_BITS = 256; static final long PACKET_START_COUNTER = Long.MIN_VALUE; - static final int MAX_PACKET_LENGTH_IN_BYTES = 1 << 30; + static final int MAX_PACKET_LENGTH_IN_BYTES = 8 << 20; // 8MB + static final int METADATA_UID_LENGTH_IN_BYTES = 9; // the length of random tag part of the metadata blob name + static final int METADATA_UID_LENGTH_IN_CHARS = 12; // base64 encoding with no padding + // this can be changed freely (can be made a repository parameter) without adjusting + // the {@link #CURRENT_ENCRYPTION_VERSION_NUMBER}, as long as it stays under the value + // of {@link #MAX_PACKET_LENGTH_IN_BYTES} + public static final int PACKET_LENGTH_IN_BYTES = 64 * (1 << 10); // 64KB - "public" because used in tests + static final String SALTED_PASSWORD_HASH_ALGO = "PBKDF2WithHmacSHA512"; + static final int SALTED_PASSWORD_HASH_ITER_COUNT = 10000; + static final int SALTED_PASSWORD_HASH_KEY_LENGTH_IN_BITS = 512; + static final int PASSWORD_HASH_SALT_LENGTH_IN_BYES = 16; + + // each snapshot metadata contains the salted password hash of the master node that started the snapshot operation + // this hash is then verified on each data node before the actual shard files snapshot, as well as on the + // master node that finalizes the snapshot (could be a different master node, if a master failover + // has occurred in the mean time) + private static final String PASSWORD_HASH_RESERVED_USER_METADATA_KEY = EncryptedRepository.class.getName() + ".saltedPasswordHash"; + // The encryption scheme version number to which the current implementation conforms to. + // The version number MUST be incremented whenever the format of the metadata, or + // the way the metadata is used for the actual decryption are changed. + // Incrementing the version number signals that previous implementations cannot make sense + // of the new scheme, so they will fail all operations on the repository. + private static final int CURRENT_ENCRYPTION_VERSION_NUMBER = 2; // nobody trusts v1 of anything + // the path of the blob container holding the encryption metadata + // this is relative to the root path holding the encrypted blobs (i.e. the repository root path) + private static final String ENCRYPTION_METADATA_ROOT = "encryption-metadata-v" + CURRENT_ENCRYPTION_VERSION_NUMBER; + + // this is the repository instance to which all blob reads and writes are forwarded to + private final BlobStoreRepository delegatedRepository; + // every data blob is encrypted with its randomly generated AES key (this is the "Data Encryption Key") + private final Supplier dataEncryptionKeySupplier; + // the {@link PasswordBasedEncryption} is used to encrypt (and decrypt) the data encryption key and the other associated metadata + // the metadata encryption is based on AES keys which are generated from the repository password + private final PasswordBasedEncryption metadataEncryption; + // Data blob encryption requires a "nonce", only if the SAME data encryption key is used for several data blobs. + // Because data encryption keys are generated randomly (see {@link #dataEncryptionKey}) the nonce in this case can be a constant value. + // But it is not a constant for reasons of greater robustness, and to allow that the encryption IV (which is part of the ciphertext) + // be inspected for ACCIDENTAL tampering without attempting decryption + private final Supplier encryptionNonceSupplier; + // the metadata is stored in a separate blob so that when the metadata is regenerated (for example, rencrypting it after the repository + // password is changed) it does not incur updates to the encrypted blob, but only recreating a new metadata blob. + // However, the encrypted blob's contents is prepended a fixed length identifier which is used to locate the corresponding metadata. + // This identifier is static for a given encrypted blob, i.e. it will not change when the metadata is recreated. + private final Supplier metadataIdentifierSupplier; + private final Supplier licenseStateSupplier; + // the salted hash of this repository's password on the local node. The password is fixed for the lifetime of the repository. + private final String repositoryPasswordSaltedHash; + // this is used to check that the salted hash of the repository password on the node that started the snapshot matches up with the + // repository password on the local node + private final HashVerifier passwordHashVerifier; + + /** + * Returns the byte length (i.e. the storage size) of an encrypted blob, given the length of the blob's plaintext contents. + * + * @see EncryptionPacketsInputStream#getEncryptionLength(long, int) + */ + public static long getEncryptedBlobByteLength(long plaintextBlobByteLength) { + return ((long) MetadataIdentifier.byteLength()) + EncryptionPacketsInputStream.getEncryptionLength(plaintextBlobByteLength, + PACKET_LENGTH_IN_BYTES); + } + + protected EncryptedRepository(RepositoryMetaData metadata, NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, + BlobStoreRepository delegatedRepository, Supplier licenseStateSupplier, + char[] password) throws NoSuchAlgorithmException { + super(metadata, namedXContentRegistry, clusterService, BlobPath.cleanPath()); + this.delegatedRepository = delegatedRepository; + KeyGenerator dataEncryptionKeyGenerator = KeyGenerator.getInstance(EncryptedRepositoryPlugin.CIPHER_ALGO); + dataEncryptionKeyGenerator.init(DATA_KEY_LENGTH_IN_BITS, SecureRandom.getInstance(EncryptedRepositoryPlugin.RAND_ALGO)); + this.dataEncryptionKeySupplier = () -> dataEncryptionKeyGenerator.generateKey(); + this.metadataEncryption = new PasswordBasedEncryption(password, SecureRandom.getInstance(EncryptedRepositoryPlugin.RAND_ALGO)); + // data encryption uses a random "nonce"s, although currently a constant "nonce" would be just as secure (because the data + // encryption key is randomly generated, using a {@code SecureRandom}, so there is no risk of reusing the same key with the same IV) + // don't use a {@code SecureRandom} though, it would be an unnecessary entropy drain + this.encryptionNonceSupplier = () -> Randomness.get().nextInt(); + // the metadata used to decrypt the encrypted blob resides in a different blob, one for each encrypted data blob + // the metadata blob name is formed from the encrypted data blob name by appending a random tag and the repository generation, + // so as to the metadata blob name unique in the unusual cases of data blob overwrite + this.metadataIdentifierSupplier = () -> { + byte[] randomMetadataNameTag = new byte[METADATA_UID_LENGTH_IN_BYTES]; + Randomness.get().nextBytes(randomMetadataNameTag); + return new MetadataIdentifier(randomMetadataNameTag, latestKnownRepoGen.get()); + }; + this.licenseStateSupplier = licenseStateSupplier; + // the salted password hash for this encrypted repository password, on the local node (this is constant) + this.repositoryPasswordSaltedHash = computeSaltedPBKDF2Hash(SecureRandom.getInstance(EncryptedRepositoryPlugin.RAND_ALGO), + password); + // used to verify that the salted password hash in the snapshot metadata matches up with the repository password on the local node + this.passwordHashVerifier = new HashVerifier(password); + } + + public static class MetadataIdentifier { + + final byte[] id; + final long repositoryGeneration; + + MetadataIdentifier(byte[] id, long repositoryGeneration) { + if (Objects.requireNonNull(id).length != METADATA_UID_LENGTH_IN_BYTES) { + throw new IllegalStateException("invalid metadata id"); + } + this.id = id; + this.repositoryGeneration = repositoryGeneration; + } + + byte[] asByteArray() { + ByteBuffer byteBuffer = ByteBuffer.allocate(byteLength()).order(ByteOrder.LITTLE_ENDIAN); + byteBuffer.put(id); + byteBuffer.putLong(METADATA_UID_LENGTH_IN_BYTES, repositoryGeneration); + return byteBuffer.array(); + } + + String asString() { + StringBuilder sb = new StringBuilder(); + sb.append('.'); + sb.append(new String(Base64.getUrlEncoder().withoutPadding().encode(id), StandardCharsets.UTF_8)); + sb.append('.'); + sb.append(repositoryGeneration); + return sb.toString(); + } + + public static int byteLength() { + return METADATA_UID_LENGTH_IN_BYTES + Long.BYTES; + } + + static String formMetadataBlobName(String blobName, MetadataIdentifier metaId) { + return blobName + metaId.asString(); + } + + static Tuple parseFromMetadataBlobName(String metadataBlobName) { + int generationPos = metadataBlobName.lastIndexOf('.'); + if (generationPos <= 0 || generationPos == metadataBlobName.length() - 1) { + throw new IllegalArgumentException("Unrecognized metadata blob name"); + } + long generation = Long.parseLong(metadataBlobName.substring(generationPos + 1)); + int idPos = metadataBlobName.lastIndexOf('.', generationPos - 1); + if (idPos <= 0 || generationPos - idPos != METADATA_UID_LENGTH_IN_CHARS + 1) { + throw new IllegalArgumentException("Unrecognized metadata blob name"); + } + byte[] id = Base64.getUrlDecoder().decode(metadataBlobName.substring(idPos + 1, generationPos)); + MetadataIdentifier metaId = new MetadataIdentifier(id, generation); + return new Tuple<>(metadataBlobName.substring(0, idPos), metaId); + } + + static MetadataIdentifier fromByteArray(byte[] idAsByteArray) { + if (Objects.requireNonNull(idAsByteArray).length != byteLength()) { + throw new IllegalArgumentException("Unrecognized metadata blob name"); + } + ByteBuffer byteBuffer = ByteBuffer.wrap(idAsByteArray).order(ByteOrder.LITTLE_ENDIAN); + byte[] id = new byte[METADATA_UID_LENGTH_IN_BYTES]; + byteBuffer.get(id); + long generation = byteBuffer.getLong(METADATA_UID_LENGTH_IN_BYTES); + return new MetadataIdentifier(id, generation); + } + } + + /** + * The repository hook method which populates the snapshot metadata with the salted password hash of the repository on the (master) + * node that starts of the snapshot operation. All the other actions associated with the same snapshot operation will first verify + * that the local repository password checks with the hash from the snapshot metadata. + *

+ * In addition, if the installed license does not comply with encrypted snapshots, this throws an exception, which aborts the snapshot + * operation. + * + * See {@link org.elasticsearch.repositories.Repository#adaptUserMetadata(Map)}. + * + * @param userMetadata the snapshot metadata as received from the calling user + * @return the snapshot metadata containing the salted password hash of the node initializing the snapshot + */ + @Override + public Map adaptUserMetadata(Map userMetadata) { + // because populating the snapshot metadata must be done before the actual snapshot is first initialized, + // we take the opportunity to validate the license and abort if non-compliant + if (false == licenseStateSupplier.get().isEncryptedSnapshotAllowed()) { + throw LicenseUtils.newComplianceException("encrypted snapshots"); + } + Map snapshotUserMetadata = new HashMap<>(); + if (userMetadata != null) { + snapshotUserMetadata.putAll(userMetadata); + } + // pin down the salted hash of the repository password + // this is then checked before every snapshot operation (i.e. {@link #snapshotShard} and {@link #finalizeSnapshot}) + // to assure that all participating nodes in the snapshot have the same repository password set + snapshotUserMetadata.put(PASSWORD_HASH_RESERVED_USER_METADATA_KEY, this.repositoryPasswordSaltedHash); + return snapshotUserMetadata; + } + + @Override + public void finalizeSnapshot(SnapshotId snapshotId, ShardGenerations shardGenerations, long startTime, String failure, + int totalShards, List shardFailures, long repositoryStateId, + boolean includeGlobalState, MetaData clusterMetaData, Map userMetadata, + Version repositoryMetaVersion, ActionListener listener) { + try { + validateRepositoryPasswordHash(userMetadata); + // remove the repository password hash from the snapshot metadata, after all repository password verifications + // have completed, so that the hash is not displayed in the API response to the user + userMetadata = new HashMap<>(userMetadata); + userMetadata.remove(PASSWORD_HASH_RESERVED_USER_METADATA_KEY); + } catch (Exception passValidationException) { + listener.onFailure(passValidationException); + return; + } + super.finalizeSnapshot(snapshotId, shardGenerations, startTime, failure, totalShards, shardFailures, repositoryStateId, + includeGlobalState, clusterMetaData, userMetadata, repositoryMetaVersion, listener); + } + + @Override + public void snapshotShard(Store store, MapperService mapperService, SnapshotId snapshotId, IndexId indexId, + IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus, Version repositoryMetaVersion, + Map userMetadata, ActionListener listener) { + try { + validateRepositoryPasswordHash(userMetadata); + } catch (Exception passValidationException) { + listener.onFailure(passValidationException); + return; + } + super.snapshotShard(store, mapperService, snapshotId, indexId, snapshotIndexCommit, snapshotStatus, repositoryMetaVersion, + userMetadata, listener); + } + + @Override + public void cleanup(long repositoryStateId, Version repositoryMetaVersion, ActionListener listener) { + if (isReadOnly()) { + listener.onFailure(new RepositoryException(metadata.name(), "cannot run cleanup on readonly repository")); + return; + } + final StepListener baseCleanupStep = new StepListener<>(); + final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + + super.cleanup(repositoryStateId, repositoryMetaVersion, baseCleanupStep); + + baseCleanupStep.whenComplete(baseCleanupResult -> { + final GroupedActionListener groupedListener = new GroupedActionListener<>(ActionListener.wrap(deleteResults -> { + DeleteResult deleteResult = new DeleteResult(baseCleanupResult.blobs(), baseCleanupResult.bytes()); + for (DeleteResult result : deleteResults) { + deleteResult = deleteResult.add(result); + } + listener.onResponse(new RepositoryCleanupResult(deleteResult)); + }, listener::onFailure), 2); + + // clean unreferenced metadata blobs on the root blob container + executor.execute(ActionRunnable.supply(groupedListener, () -> { + EncryptedBlobContainer encryptedBlobContainer = (EncryptedBlobContainer) blobContainer(); + return cleanUnreferencedEncryptionMetadata(encryptedBlobContainer); + })); + + // clean indices blob containers + executor.execute(ActionRunnable.supply(groupedListener, () -> { + EncryptedBlobContainer indicesBlobContainer = (EncryptedBlobContainer) blobStore().blobContainer(indicesPath()); + Map metadataIndices = indicesBlobContainer.encryptionMetadataBlobContainer.children(); + Map dataIndices = indicesBlobContainer.delegatedBlobContainer.children(); + DeleteResult deleteResult = DeleteResult.ZERO; + for (Map.Entry metadataIndexContainer : metadataIndices.entrySet()) { + if (false == dataIndices.containsKey(metadataIndexContainer.getKey())) { + // the index metadata blob container exists but the encrypted data blob container does not + Long indexGeneration = findFirstGeneration(metadataIndexContainer.getValue()); + if (indexGeneration != null && indexGeneration < latestKnownRepoGen.get()) { + logger.debug("[{}] Found stale metadata index container [{}]. Cleaning it up", metadata.name(), + metadataIndexContainer.getValue().path()); + deleteResult = deleteResult.add(metadataIndexContainer.getValue().delete()); + logger.debug("[{}] Cleaned up stale metadata index container [{}]", metadata.name(), + metadataIndexContainer.getValue().path()); + } + } + } + return deleteResult; + })); + + }, listener::onFailure); + } + + @Override + protected BlobStore createBlobStore() { + return new EncryptedBlobStore(delegatedRepository, dataEncryptionKeySupplier, metadataEncryption, + encryptionNonceSupplier, metadataIdentifierSupplier); + } + + @Override + protected void doStart() { + this.delegatedRepository.start(); + super.doStart(); + } + + @Override + protected void doStop() { + super.doStop(); + this.delegatedRepository.stop(); + } + + @Override + protected void doClose() { + super.doClose(); + this.delegatedRepository.close(); + } + + @Override + protected long readSnapshotIndexLatestBlob() throws IOException { + // the index.latest blob is not encrypted + EncryptedBlobContainer encryptedBlobContainer = (EncryptedBlobContainer) blobContainer(); + return Numbers.bytesToLong(Streams.readFully(encryptedBlobContainer.delegatedBlobContainer.readBlob(INDEX_LATEST_BLOB)) + .toBytesRef()); + } + + @Override + protected void writeSnapshotIndexLatestBlob(long newGen) throws IOException { + final BytesReference genBytes; + try (BytesStreamOutput bStream = new BytesStreamOutput()) { + bStream.writeLong(newGen); + genBytes = bStream.bytes(); + } + // the index.latest blob is not encrypted + EncryptedBlobContainer encryptedBlobContainer = (EncryptedBlobContainer) blobContainer(); + try (InputStream stream = genBytes.streamInput()) { + encryptedBlobContainer.delegatedBlobContainer.writeBlobAtomic(INDEX_LATEST_BLOB, stream, genBytes.length(), false); + } + } + + private DeleteResult cleanUnreferencedEncryptionMetadata(EncryptedBlobContainer blobContainer) throws IOException { + Map allMetadataBlobs = blobContainer.encryptionMetadataBlobContainer.listBlobs(); + Map allDataBlobs = blobContainer.delegatedBlobContainer.listBlobs(); + // map from the data blob name to all the associated metadata + Map>> metaDataByBlobName = new HashMap<>(); + List metadataBlobsToDelete = new ArrayList<>(); + for (String metadataBlobName : allMetadataBlobs.keySet()) { + final Tuple blobNameAndMetaId; + try { + blobNameAndMetaId = MetadataIdentifier.parseFromMetadataBlobName(metadataBlobName); + } catch (IllegalArgumentException e) { + // ignore invalid metadata blob names, which most likely have been created externally + logger.warn("Unrecognized blob name for metadata [" + metadataBlobName + "]", e); + continue; + } + if (false == allDataBlobs.containsKey(blobNameAndMetaId.v1()) && + blobNameAndMetaId.v2().repositoryGeneration < latestKnownRepoGen.get()) { + // the data blob for this metadata blob is not going to appear, the repo moved to a new generation, which means that a + // "parent" blob of it appeared + metadataBlobsToDelete.add(blobNameAndMetaId.v1()); + } + // group metadata blobs by their associated blob name + metaDataByBlobName.computeIfAbsent(blobNameAndMetaId.v1(), k -> new ArrayList<>(1)) + .add(new Tuple<>(blobNameAndMetaId.v2(), metadataBlobName)); + } + metaDataByBlobName.entrySet().forEach(entry -> { + if (entry.getValue().size() > 1) { + // if there are multiple versions of the metadata, then remove ones created in olden repository generations + // since overwrites cannot happen across repository generations + long maxRepositoryGeneration = + entry.getValue().stream().map(meta -> meta.v1().repositoryGeneration).max(Long::compare).get(); + entry.getValue().forEach(meta -> { + if (meta.v1().repositoryGeneration < maxRepositoryGeneration) { + metadataBlobsToDelete.add(meta.v2()); + } + }); + } + }); + logger.info("[{}] Found unreferenced metadata blobs {} at path {}. Cleaning them up", metadata.name(), metadataBlobsToDelete, + blobContainer.encryptionMetadataBlobContainer.path()); + blobContainer().deleteBlobsIgnoringIfNotExists(metadataBlobsToDelete); + return new DeleteResult(metadataBlobsToDelete.size(), + metadataBlobsToDelete.stream().mapToLong(name -> allMetadataBlobs.get(name).length()).sum()); + } + + // aux "ugly" function which infers the repository generation under which an index blob container has been created + private Long findFirstGeneration(BlobContainer metadataBlobContainer) throws IOException { + for (String metaBlobName : metadataBlobContainer.listBlobs().keySet()) { + try { + return MetadataIdentifier.parseFromMetadataBlobName(metaBlobName).v2().repositoryGeneration; + } catch (IllegalArgumentException e) { + // ignored, let's find another meta blob name we can parse + } + } + for (BlobContainer child : metadataBlobContainer.children().values()) { + Long generation = findFirstGeneration(child); + if (generation != null) { + return generation; + } + } + return null; + }; + + private static class EncryptedBlobStore implements BlobStore { + + private final BlobStore delegatedBlobStore; + private final BlobPath delegatedBasePath; + private final Supplier dataEncryptionKeySupplier; + private final PasswordBasedEncryption metadataEncryption; + private final Supplier encryptionNonceSupplier; + private final Supplier metadataIdentifierSupplier; + EncryptedBlobStore(BlobStoreRepository delegatedBlobStoreRepository, Supplier dataEncryptionKeySupplier, + PasswordBasedEncryption metadataEncryption, Supplier encryptionNonceSupplier, + Supplier metadataIdentifierSupplier) { + this.delegatedBlobStore = delegatedBlobStoreRepository.blobStore(); + this.delegatedBasePath = delegatedBlobStoreRepository.basePath(); + this.dataEncryptionKeySupplier = dataEncryptionKeySupplier; + this.metadataEncryption = metadataEncryption; + this.encryptionNonceSupplier = encryptionNonceSupplier; + this.metadataIdentifierSupplier = metadataIdentifierSupplier; + } + + @Override + public void close() throws IOException { + delegatedBlobStore.close(); + } + + @Override + public BlobContainer blobContainer(BlobPath path) { + return new EncryptedBlobContainer(delegatedBlobStore, delegatedBasePath, path, dataEncryptionKeySupplier, metadataEncryption, + encryptionNonceSupplier, metadataIdentifierSupplier); + } + } + + private static class EncryptedBlobContainer implements BlobContainer { + + private final BlobStore delegatedBlobStore; + private final BlobPath delegatedBasePath; + private final BlobPath path; + private final Supplier dataEncryptionKeySupplier; + private final PasswordBasedEncryption metadataEncryption; + private final Supplier encryptionNonceSupplier; + private final Supplier metadataIdentifierSupplier; + private final BlobContainer delegatedBlobContainer; + private final BlobContainer encryptionMetadataBlobContainer; + + EncryptedBlobContainer(BlobStore delegatedBlobStore, BlobPath delegatedBasePath, BlobPath path, + Supplier dataEncryptionKeySupplier, PasswordBasedEncryption metadataEncryption, + Supplier encryptionNonceSupplier, Supplier metadataIdentifierSupplier) { + this.delegatedBlobStore = delegatedBlobStore; + this.delegatedBasePath = delegatedBasePath; + this.path = path; + this.dataEncryptionKeySupplier = dataEncryptionKeySupplier; + this.metadataEncryption = metadataEncryption; + this.encryptionNonceSupplier = encryptionNonceSupplier; + this.metadataIdentifierSupplier = metadataIdentifierSupplier; + this.delegatedBlobContainer = delegatedBlobStore.blobContainer(delegatedBasePath.append(path)); + this.encryptionMetadataBlobContainer = + delegatedBlobStore.blobContainer(delegatedBasePath.add(ENCRYPTION_METADATA_ROOT).append(path)); + } + + /** + * Returns the {@link BlobPath} to where the encrypted blobs are stored. Note that the encryption metadata is contained + * in separate blobs which are stored under a different blob path (see + * {@link #encryptionMetadataBlobContainer}). This blob path resembles the path of the encrypted + * blobs but is rooted under a specific path component (see {@link #ENCRYPTION_METADATA_ROOT}). The encryption is transparent + * in the sense that the metadata is not exposed by the {@link EncryptedBlobContainer}. + * + * @return the BlobPath to where the encrypted blobs are contained + */ + @Override + public BlobPath path() { + return path; + } + + /** + * Returns a new {@link InputStream} for the given {@code blobName} that can be used to read the contents of the blob. + * The returned {@code InputStream} transparently handles the decryption of the blob contents, by first working out + * the blob name of the associated metadata, reading and decrypting the metadata (given the repository password and utilizing + * the {@link PasswordBasedEncryption}) and lastly reading and decrypting the data blob, in a streaming fashion by employing the + * {@link DecryptionPacketsInputStream}. The {@code DecryptionPacketsInputStream} does not return un-authenticated data. + * + * @param blobName + * The name of the blob to get an {@link InputStream} for. + */ + @Override + public InputStream readBlob(String blobName) throws IOException { + // this requires two concurrent readBlob connections so it's possible that, under lab conditions, the storage service + // is saturated only by the first read connection of the pair, so that the second read connection (for the metadata) can not be + // fulfilled. In this case the second connection will time-out which will trigger the closing of the first one, therefore + // allowing other pair connections to complete. In this situation the restore process should slowly make headway, albeit under + // read-timeout exceptions + final InputStream encryptedDataInputStream = delegatedBlobContainer.readBlob(blobName); + try { + // read the metadata identifier (fixed length) which is prepended to the encrypted blob + final byte[] metaId = encryptedDataInputStream.readNBytes(MetadataIdentifier.byteLength()); + if (metaId.length != MetadataIdentifier.byteLength()) { + throw new IOException("Failure to read encrypted blob metadata identifier"); + } + final MetadataIdentifier metadataIdentifier = MetadataIdentifier.fromByteArray(metaId); + // the metadata blob name is the name of the data blob followed by the base64 encoding (URL safe) of the metadata identifier + final String metadataBlobName = MetadataIdentifier.formMetadataBlobName(blobName, metadataIdentifier); + // read the encrypted metadata contents + final BytesReference encryptedMetadataBytes = Streams.readFully(encryptionMetadataBlobContainer.readBlob(metadataBlobName)); + final BlobEncryptionMetadata metadata; + try { + // decrypt and parse metadata + metadata = BlobEncryptionMetadata.deserializeMetadata(BytesReference.toBytes(encryptedMetadataBytes), + metadataEncryption::decrypt); + } catch (IOException e) { + // friendlier exception message + String failureMessage = "Failure to decrypt metadata for blob [" + blobName + "]"; + if (e.getCause() instanceof AEADBadTagException) { + failureMessage = failureMessage + ". The repository password is probably wrong."; + } + throw new IOException(failureMessage, e); + } + // read and decrypt the data blob + return new DecryptionPacketsInputStream(encryptedDataInputStream, metadata.getDataEncryptionKey(), metadata.getNonce(), + metadata.getPacketLengthInBytes()); + } catch (Exception e) { + try { + encryptedDataInputStream.close(); + } catch (IOException closeEx) { + e.addSuppressed(closeEx); + } + throw e; + } + } + + /** + * Reads the blob content from the input stream and writes it to the container in a new blob with the given name. + * If {@code failIfAlreadyExists} is {@code true} and a blob with the same name already exists, the write operation will fail; + * otherwise, if {@code failIfAlreadyExists} is {@code false} the blob is overwritten. + * The contents are encrypted in a streaming fashion. The encryption key is randomly generated for each blob. + * The encryption key is separately stored in a metadata blob, which is encrypted with another key derived from the repository + * password. The metadata blob is stored first, before the encrypted data blob, so as to ensure that no encrypted data blobs + * are left without the associated metadata, in any failure scenario. + * + * @param blobName + * The name of the blob to write the contents of the input stream to. + * @param inputStream + * The input stream from which to retrieve the bytes to write to the blob. + * @param blobSize + * The size of the blob to be written, in bytes. It is implementation dependent whether + * this value is used in writing the blob to the repository. + * @param failIfAlreadyExists + * whether to throw a FileAlreadyExistsException if the given blob already exists + */ + @Override + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { + final SecretKey dataEncryptionKey = dataEncryptionKeySupplier.get(); + final int nonce = encryptionNonceSupplier.get(); + // this is the metadata required to decrypt back the (soon to be) encrypted blob + final BlobEncryptionMetadata metadata = new BlobEncryptionMetadata(nonce, PACKET_LENGTH_IN_BYTES, dataEncryptionKey); + // encrypt the metadata + final byte[] encryptedMetadata; + try { + encryptedMetadata = BlobEncryptionMetadata.serializeMetadata(metadata, metadataEncryption::encrypt); + } catch (IOException e) { + throw new IOException("Failure to encrypt metadata for blob [" + blobName + "]", e); + } + // the metadata identifier is a sufficiently long random byte array so as to make it practically unique + // the goal is to avoid overwriting metadata blobs even if the encrypted data blobs are overwritten + final MetadataIdentifier metadataIdentifier = metadataIdentifierSupplier.get(); + final String metadataBlobName = MetadataIdentifier.formMetadataBlobName(blobName, metadataIdentifier); + // first write the encrypted metadata to a UNIQUE blob name + try (ByteArrayInputStream encryptedMetadataInputStream = new ByteArrayInputStream(encryptedMetadata)) { + encryptionMetadataBlobContainer.writeBlob(metadataBlobName, encryptedMetadataInputStream, encryptedMetadata.length, true + /* fail in the exceptional case of metadata blob name conflict */); + } + // afterwards write the encrypted data blob + // prepended to the encrypted data blob is the unique identifier (fixed length) of the metadata blob + final long encryptedBlobSize = (long) MetadataIdentifier.byteLength() + + EncryptionPacketsInputStream.getEncryptionLength(blobSize, PACKET_LENGTH_IN_BYTES); + try (InputStream encryptedInputStream = + ChainingInputStream.chain(new ByteArrayInputStream(metadataIdentifier.asByteArray()), + new EncryptionPacketsInputStream(inputStream, dataEncryptionKey, nonce, PACKET_LENGTH_IN_BYTES))) { + delegatedBlobContainer.writeBlob(blobName, encryptedInputStream, encryptedBlobSize, failIfAlreadyExists); + } + } + + @Override + public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) + throws IOException { + // the encrypted repository does not offer an alternative implementation for atomic writes + // fallback to regular write + writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); + } + + @Override + public DeleteResult delete() throws IOException { + // first delete the encrypted data blob + DeleteResult deleteResult = delegatedBlobContainer.delete(); + // then delete metadata + try { + deleteResult = deleteResult.add(encryptionMetadataBlobContainer.delete()); + } catch (IOException e) { + // the encryption metadata blob container might not exist at all + logger.warn("Failure to delete metadata blob container " + encryptionMetadataBlobContainer.path(), e); + } + return deleteResult; + } + + @Override + public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOException { + Objects.requireNonNull(blobNames); + + // find all the blob names that must be deleted + Set blobNamesSet = new HashSet<>(blobNames); + Set blobNamesToDelete = new HashSet<>(); + for (String existingBlobName : delegatedBlobContainer.listBlobs().keySet()) { + if (blobNamesSet.contains(existingBlobName)) { + blobNamesToDelete.add(existingBlobName); + } + } + + // find all the metadata blob names that must be deleted + Map> blobNamesToMetadataNamesToDelete = new HashMap<>(blobNamesToDelete.size()); + Set allMetadataBlobNames = new HashSet<>(); + try { + allMetadataBlobNames = encryptionMetadataBlobContainer.listBlobs().keySet(); + } catch (IOException e) { + // the metadata blob container might not even exist + // the encrypted data is the "anchor" for encrypted blobs, if those are removed, the encrypted blob as a whole is + // considered removed, even if, technically, the metadata is still lingering (it should later be removed by cleanup) + // therefore this tolerates metadata delete failures, when data deletes are successful + logger.warn("Failure to list blobs of metadata blob container " + encryptionMetadataBlobContainer.path(), e); + } + for (String metadataBlobName : allMetadataBlobNames) { + final String blobNameForMetadata; + try { + blobNameForMetadata = MetadataIdentifier.parseFromMetadataBlobName(metadataBlobName).v1(); + } catch (IllegalArgumentException e) { + // ignore invalid metadata blob names, which most likely have been created externally + continue; + } + // group metadata blob names to their associated blob name + if (blobNamesToDelete.contains(blobNameForMetadata)) { + blobNamesToMetadataNamesToDelete.computeIfAbsent(blobNameForMetadata, k -> new ArrayList<>(1)) + .add(metadataBlobName); + } + } + // Metadata deletes when there are multiple for the same blob is un-safe, so don't try it now. + // It is unsafe because metadata "appears" before the data and there could be an overwrite in progress for which only + // the metadata, but not the encrypted data, shows up. + List metadataBlobNamesToDelete = new ArrayList<>(blobNamesToMetadataNamesToDelete.size()); + blobNamesToMetadataNamesToDelete.entrySet().forEach(entry -> { + if (entry.getValue().size() == 1) { + metadataBlobNamesToDelete.add(entry.getValue().get(0)); + } + // technically, duplicate metadata written during olden repository generations could be removed here as well, + // but this code should not be aware of what a repository generation is, so let the metadata linger, it will + // be garbage collected by cleanup + }); + + // then delete the encrypted data blobs + delegatedBlobContainer.deleteBlobsIgnoringIfNotExists(new ArrayList<>(blobNamesToDelete)); + + // lastly delete metadata blobs + try { + encryptionMetadataBlobContainer.deleteBlobsIgnoringIfNotExists(metadataBlobNamesToDelete); + } catch (IOException e) { + logger.warn("Failure to delete metadata blobs " + metadataBlobNamesToDelete + " from blob container " + + encryptionMetadataBlobContainer.path(), e); + } + } + + @Override + public Map listBlobs() throws IOException { + // The encrypted data blobs "anchor" the metadata-data blob pair, i.e. the encrypted blob "exists" if only the data exists. + // In all circumstances, barring an "external" access to the repository, the metadata associated to the data must exist. + return delegatedBlobContainer.listBlobs(); + } + + @Override + public Map listBlobsByPrefix(String blobNamePrefix) throws IOException { + // The encrypted data blobs "anchor" the metadata-data blob pair, i.e. the encrypted blob "exists" if only the data exists. + // In all circumstances, barring an "external" access to the repository, the metadata associated to the data must exist. + return delegatedBlobContainer.listBlobsByPrefix(blobNamePrefix); + } + + @Override + public Map children() throws IOException { + // the encrypted data blob container is the source-of-truth for child container operations + // the metadata blob container mirrors its structure, but in some failure cases it might contain + // additional orphaned metadata blobs + Map childEncryptedBlobContainers = delegatedBlobContainer.children(); + Map result = new HashMap<>(childEncryptedBlobContainers.size()); + for (Map.Entry encryptedBlobContainer : childEncryptedBlobContainers.entrySet()) { + if (encryptedBlobContainer.getValue().path().equals(encryptionMetadataBlobContainer.path())) { + // do not descend recursively into the metadata blob container itself + continue; + } + // get an encrypted blob container for each child + // Note that the encryption metadata blob container might be missing + result.put(encryptedBlobContainer.getKey(), new EncryptedBlobContainer(delegatedBlobStore, delegatedBasePath, + path.add(encryptedBlobContainer.getKey()), dataEncryptionKeySupplier, metadataEncryption, + encryptionNonceSupplier, metadataIdentifierSupplier)); + } + return result; + } + } + + private static String computeSaltedPBKDF2Hash(SecureRandom secureRandom, char[] password) { + byte[] salt = new byte[PASSWORD_HASH_SALT_LENGTH_IN_BYES]; + secureRandom.nextBytes(salt); + return computeSaltedPBKDF2Hash(salt, password); + } + + private static String computeSaltedPBKDF2Hash(byte[] salt, char[] password) { + final PBEKeySpec spec = new PBEKeySpec(password, salt, SALTED_PASSWORD_HASH_ITER_COUNT, SALTED_PASSWORD_HASH_KEY_LENGTH_IN_BITS); + final byte[] hash; + try { + SecretKeyFactory pbkdf2KeyFactory = SecretKeyFactory.getInstance(SALTED_PASSWORD_HASH_ALGO); + hash = pbkdf2KeyFactory.generateSecret(spec).getEncoded(); + } catch (InvalidKeySpecException | NoSuchAlgorithmException e) { + throw new RuntimeException("Unexpected exception when computing the hash of the repository password", e); + } + return new String(Base64.getUrlEncoder().withoutPadding().encode(salt), StandardCharsets.UTF_8) + ":" + + new String(Base64.getUrlEncoder().withoutPadding().encode(hash), StandardCharsets.UTF_8); + } + + /** + * Called before the shard snapshot and finalize operations, on the data and master nodes. This validates that the repository + * password hash of the master node that started the snapshot operation matches with the repository password on the data nodes. + * + * @param snapshotUserMetadata the snapshot metadata to verify + * @throws RepositoryException if the repository password on the local node mismatches or cannot be verified from the + * master's password hash from {@code snapshotUserMetadata} + */ + private void validateRepositoryPasswordHash(Map snapshotUserMetadata) throws RepositoryException { + if (snapshotUserMetadata == null) { + throw new RepositoryException(metadata.name(), "Unexpected fatal internal error", + new IllegalStateException("Null snapshot metadata")); + } + final Object repositoryPasswordHash = snapshotUserMetadata.get(PASSWORD_HASH_RESERVED_USER_METADATA_KEY); + if (repositoryPasswordHash == null || (false == repositoryPasswordHash instanceof String)) { + throw new RepositoryException(metadata.name(), "Unexpected fatal internal error", + new IllegalStateException("Snapshot metadata does not contain the repository password hash as a String")); + } + if (false == passwordHashVerifier.verify((String) repositoryPasswordHash)) { + throw new RepositoryException(metadata.name(), + "Repository password mismatch. The local node's value of the keystore secure setting [" + + EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(metadata.name()).getKey() + + "] is different from the elected master node, which started the snapshot operation"); + } + } + + /** + * This is used to verify that salted hashes match up with the {@code password} from the constructor argument. + * This also caches the last successfully verified hash, so that repeated checks for the same hash turn into a simple {@code String + * #equals}. + */ + private static class HashVerifier { + // the password to which the salted hashes must match up with + private final char[] password; + // the last successfully matched salted hash + private final AtomicReference lastVerifiedHash; + + HashVerifier(char[] password) { + this.password = password; + this.lastVerifiedHash = new AtomicReference<>(null); + } + + boolean verify(String saltedHash) { + Objects.requireNonNull(saltedHash); + // first check if this exact hash has been checked before + if (saltedHash.equals(lastVerifiedHash.get())) { + logger.debug("The repository salted password hash [" + saltedHash + "] is locally cached as VALID"); + return true; + } + String[] parts = saltedHash.split(":"); + // the hash has an invalid format + if (parts == null || parts.length != 2) { + logger.error("Unrecognized format for the repository password hash [" + saltedHash + "]"); + return false; + } + String salt = parts[0]; + logger.debug("Computing repository password hash"); + String computedHash = computeSaltedPBKDF2Hash(Base64.getUrlDecoder().decode(salt.getBytes(StandardCharsets.UTF_8)), password); + if (false == computedHash.equals(saltedHash)) { + return false; + } + // remember last successfully verified hash + lastVerifiedHash.set(computedHash); + logger.debug("Repository password hash [" + saltedHash + "] validated successfully and is now locally cached"); + return true; + } + + } } diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepositoryPlugin.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepositoryPlugin.java index 93e511bb14383..cab86a67b4aa5 100644 --- a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepositoryPlugin.java +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptedRepositoryPlugin.java @@ -6,26 +6,110 @@ package org.elasticsearch.repositories.encrypted; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.xpack.core.XPackPlugin; +import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.function.Function; -public final class EncryptedRepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { +// not-final for tests +public class EncryptedRepositoryPlugin extends Plugin implements RepositoryPlugin { - public EncryptedRepositoryPlugin(final Settings settings) { + static final Logger logger = LogManager.getLogger(EncryptedRepositoryPlugin.class); + public static final String REPOSITORY_TYPE_NAME = "encrypted"; + static final String CIPHER_ALGO = "AES"; + static final String RAND_ALGO = "SHA1PRNG"; + // "public" because used in integ tests for other repository types + public static final Setting.AffixSetting ENCRYPTION_PASSWORD_SETTING = Setting.affixKeySetting("repository.encrypted.", + "password", key -> SecureSetting.secureString(key, null, Setting.Property.Consistent)); + // "public" because used in integ tests for other repository types + public static final Setting DELEGATE_TYPE = new Setting<>("delegate_type", "", Function.identity()); + + protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + + public EncryptedRepositoryPlugin(Settings settings) { + if (false == getLicenseState().isEncryptedSnapshotAllowed()) { + logger.warn("Encrypted snapshot repositories are not allowed for the current license." + + "Snapshotting to any encrypted repository is not permitted and will fail.", + LicenseUtils.newComplianceException(EncryptedRepositoryPlugin.REPOSITORY_TYPE_NAME + " snapshot repository")); + } } @Override public List> getSettings() { - return List.of(); + return List.of(ENCRYPTION_PASSWORD_SETTING); } @Override - public void reload(Settings settings) { - // Secure settings should be readable inside this method. + public Map getRepositories(final Environment env, final NamedXContentRegistry registry, + final ClusterService clusterService) { + // cache all the passwords for encrypted repositories while keystore-based secure passwords are still readable + final Map cachedRepositoryPasswords = new HashMap<>(); + for (String repositoryName : ENCRYPTION_PASSWORD_SETTING.getNamespaces(env.settings())) { + Setting encryptionPasswordSetting = ENCRYPTION_PASSWORD_SETTING + .getConcreteSettingForNamespace(repositoryName); + SecureString encryptionPassword = encryptionPasswordSetting.get(env.settings()); + cachedRepositoryPasswords.put(repositoryName, encryptionPassword.getChars()); + } + return Collections.singletonMap(REPOSITORY_TYPE_NAME, new Repository.Factory() { + + @Override + public Repository create(RepositoryMetaData metadata) { + throw new UnsupportedOperationException(); + } + + @Override + public Repository create(RepositoryMetaData metaData, Function typeLookup) throws Exception { + if (false == getLicenseState().isEncryptedSnapshotAllowed()) { + logger.warn("Encrypted snapshots are not allowed for the currently installed license." + + "Snapshots to the [" + metaData.name() + "] encrypted repository are not permitted." + + "All the other operations, including restore, are still permitted.", + LicenseUtils.newComplianceException("encrypted snapshots")); + } + String delegateType = DELEGATE_TYPE.get(metaData.settings()); + if (Strings.hasLength(delegateType) == false) { + throw new IllegalArgumentException(DELEGATE_TYPE.getKey() + " must be set"); + } + if (REPOSITORY_TYPE_NAME.equals(delegateType)) { + throw new IllegalArgumentException("Cannot encrypt an already encrypted repository. " + DELEGATE_TYPE.getKey() + + " must not be equal to " + REPOSITORY_TYPE_NAME); + } + Repository.Factory factory = typeLookup.apply(delegateType); + if (null == factory) { + throw new IllegalArgumentException("Unsupported delegate type " + DELEGATE_TYPE.getKey()); + } + Repository delegatedRepository = factory.create(new RepositoryMetaData(metaData.name(), + delegateType, metaData.settings())); + if (false == (delegatedRepository instanceof BlobStoreRepository) || delegatedRepository instanceof EncryptedRepository) { + throw new IllegalArgumentException("Unsupported delegate type " + DELEGATE_TYPE.getKey()); + } + final char[] repositoryPassword = cachedRepositoryPasswords.get(metaData.name()); + if (repositoryPassword == null) { + throw new IllegalArgumentException( + ENCRYPTION_PASSWORD_SETTING.getConcreteSettingForNamespace(metaData.name()).getKey() + " must be set"); + } + return new EncryptedRepository(metaData, registry, clusterService, (BlobStoreRepository) delegatedRepository, + () -> getLicenseState(), repositoryPassword); + } + }); } } diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStream.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStream.java index a45569c2f9632..0c39da326077d 100644 --- a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStream.java +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStream.java @@ -6,6 +6,8 @@ package org.elasticsearch.repositories.encrypted; +import org.elasticsearch.core.internal.io.IOUtils; + import javax.crypto.Cipher; import javax.crypto.CipherInputStream; import javax.crypto.NoSuchPaddingException; @@ -171,10 +173,22 @@ public void reset() throws IOException { } } + @Override + public void close() throws IOException { + Exception superException = null; + try { + super.close(); + } catch (IOException e) { + superException = e; + } finally { + IOUtils.close(superException, source); + } + } + private static Cipher getPacketEncryptionCipher(SecretKey secretKey, byte[] packetIv) throws IOException { GCMParameterSpec gcmParameterSpec = new GCMParameterSpec(EncryptedRepository.GCM_TAG_LENGTH_IN_BYTES * Byte.SIZE, packetIv); try { - Cipher packetCipher = Cipher.getInstance(EncryptedRepository.GCM_ENCRYPTION_SCHEME); + Cipher packetCipher = Cipher.getInstance(EncryptedRepository.DATA_ENCRYPTION_SCHEME); packetCipher.init(Cipher.ENCRYPT_MODE, secretKey, gcmParameterSpec); return packetCipher; } catch (NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException | InvalidAlgorithmParameterException e) { diff --git a/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/PasswordBasedEncryption.java b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/PasswordBasedEncryption.java new file mode 100644 index 0000000000000..86c3a24171a9b --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/main/java/org/elasticsearch/repositories/encrypted/PasswordBasedEncryption.java @@ -0,0 +1,239 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.repositories.encrypted; + +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.collect.Tuple; + +import javax.crypto.Cipher; +import javax.crypto.SecretKey; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.GCMParameterSpec; +import javax.crypto.spec.PBEKeySpec; +import javax.crypto.spec.SecretKeySpec; +import java.nio.charset.StandardCharsets; +import java.security.GeneralSecurityException; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.security.spec.InvalidKeySpecException; +import java.util.Arrays; +import java.util.Base64; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Encrypts and decrypts using a password. Decryption authenticates the cyphertext so as to make sure that + * the same password has been used during encryption (the cipher mode is AES/GCM/NoPadding). The caller must + * ensure that the password and the ciphertext are not stored on the same "medium" (storage partition). + *

+ * The {@code password} constructor argument is used to generate AES 256-bit wide keys using the PBKDF2 algorithm. + * The "salt", which is the other required parameter to the PBKDF2 algo, is generated randomly (32 byte-wide) using a + * {@code SecureRandom} instance. The "salt" is not a secret, like the password is, and it is used to generate different + * keys starting from the same password. + *

+ * A new encryption key is generated for every {@link PasswordBasedEncryption} instance (using a newly generated random + * "salt"). The key is then reused for as many as {@link #ENCRYPT_INVOKE_LIMIT_USING_SAME_KEY} encryption invocations; + * when the limit is exceeded, a new key is computed from a newly generated "salt". In order to support the decryption + * operation, the "salt" is prepended to the returned ciphertext. Decryption reads-in the "salt" and uses the secret + * password to regenerate the same key and decrypt and authenticate the ciphertext. The key thus computed is locally + * cached for possible reuses because generating the key from the password is an expensive operation (by design). + *

+ * The reason why there is an encryption invocation limit for the same key is because the AES/GCM/NoPadding encryption mode + * must not be used with the same key and the same Initialization Vector. During encryption, the {@link PasswordBasedEncryption} + * randomly generates a new 12-byte wide IV, and so in order to limit the risk of a collision, the key must be changed + * after at most {@link #ENCRYPT_INVOKE_LIMIT_USING_SAME_KEY} IVs have been generated and used with that same key. For more + * details, see Section 8.2 of https://csrc.nist.gov/publications/detail/sp/800-38d/final . + *

+ * {@code PasswordBasedEncryption} objects are safe for use by multiple concurrent threads. + */ +public final class PasswordBasedEncryption { + + // the count of keys stored so as to avoid re-computation + static final int ENCRYPTION_KEY_CACHE_SIZE = 512; + // The cipher used to encrypt the data + static final String CIPHER_ALGO = "AES"; + // The mode used with the cipher algorithm + private static final String CIPHER_MODE = "GCM"; + // The padding used with the cipher algorithm + private static final String CIPHER_PADDING = "NoPadding"; + // the KDF algorithm that generate the symmetric key given the password + private static final String KDF_ALGO = "PBKDF2WithHmacSHA512"; + // parameter for the KDF function, it's a funny and unusual iter count larger than 60k + private static final int KDF_ITER = 61616; + // the salt, which is generated randomly, is another parameter for the KDF function + private static final int SALT_LENGTH_IN_BYTES = 32; + // the key encryption key is 256 bit wide (AES256) + private static final int KEY_SIZE_IN_BITS = 256; + // the GCM cipher mode uses a 12 byte wide IV + private static final int IV_LENGTH_IN_BYTES = 12; + // the GCM cipher mode generates a 16 byte wide authentication tag + private static final int TAG_LENGTH_IN_BYTES = 16; + // according to NIST SP 800-38D https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-38d.pdf + // the number of encrypt operations using the same key, when the IV is generated randomly, should be limited + // to less than 2^32 (so we use 2^31 to err on the safe side) + private static final long ENCRYPT_INVOKE_LIMIT_USING_SAME_KEY = 1L<<31; + + // the password which is used to generate all the encryption and decryption keys (the keys are different because they each + // are generated using a different salt, which is generated randomly) + private final char[] password; + // this is used to generate the IVs for each encryption instance as well as the salt for every key generation + private final SecureRandom secureRandom; // this is thread-safe + // this is used to store the secret keys given the salt that was used in generating it + private final Cache> keyBySaltCache; + // the salt of the secret key which is used for encryption + private final AtomicReference> currentEncryptionKeySalt; + + public PasswordBasedEncryption(char[] password, SecureRandom secureRandom) { + this.password = password; + this.secureRandom = secureRandom; + this.keyBySaltCache = CacheBuilder.>builder() + .setMaximumWeight(ENCRYPTION_KEY_CACHE_SIZE) + .build(); + // set the random salt which is used to generate the encryption key + byte[] randomEncryptionKeySaltBytes = new byte[SALT_LENGTH_IN_BYTES]; + secureRandom.nextBytes(randomEncryptionKeySaltBytes); + this.currentEncryptionKeySalt = new AtomicReference<>(new LimitedSupplier<>( + new String(Base64.getEncoder().encode(randomEncryptionKeySaltBytes), StandardCharsets.UTF_8), + ENCRYPT_INVOKE_LIMIT_USING_SAME_KEY)); + } + + public byte[] encrypt(byte[] data, @Nullable byte[] associatedData) throws ExecutionException, GeneralSecurityException { + Objects.requireNonNull(data); + // retrieve the encryption key + Tuple saltAndEncryptionKey = useEncryptionKey(); + // create the IV randomly + byte[] iv = new byte[IV_LENGTH_IN_BYTES]; + secureRandom.nextBytes(iv); + // create cipher for metadata encryption + GCMParameterSpec gcmParameterSpec = new GCMParameterSpec(TAG_LENGTH_IN_BYTES * Byte.SIZE, iv); + Cipher cipher = Cipher.getInstance(CIPHER_ALGO + "/" + CIPHER_MODE + "/" + CIPHER_PADDING); + cipher.init(Cipher.ENCRYPT_MODE, saltAndEncryptionKey.v2(), gcmParameterSpec); + // update the cipher with the associated data + if (associatedData != null && associatedData.length > 0) { + cipher.updateAAD(associatedData); + } + // encrypt the data + byte[] encryptedData = cipher.doFinal(data); + // concatenate key salt, iv and metadata cipher text + byte[] resultCiphertext = new byte[saltAndEncryptionKey.v1().length + iv.length + encryptedData.length]; + // prepend salt + System.arraycopy(saltAndEncryptionKey.v1(), 0, resultCiphertext, 0, saltAndEncryptionKey.v1().length); + // follow-up with the iv + System.arraycopy(iv, 0, resultCiphertext, saltAndEncryptionKey.v1().length, iv.length); + // and finally conclude the result with the ciphertext (the output of the cipher) + System.arraycopy(encryptedData, 0, resultCiphertext, saltAndEncryptionKey.v1().length + iv.length, encryptedData.length); + return resultCiphertext; + } + + public byte[] decrypt(byte[] encryptedData, @Nullable byte[] associatedData) throws ExecutionException, GeneralSecurityException { + if (Objects.requireNonNull(encryptedData).length < SALT_LENGTH_IN_BYTES + IV_LENGTH_IN_BYTES + TAG_LENGTH_IN_BYTES) { + throw new IllegalArgumentException("Ciphertext too short"); + } + // extract the salt prepended to the ciphertext + byte[] salt = Arrays.copyOf(encryptedData, SALT_LENGTH_IN_BYTES); + // get the key associated with the salt + SecretKey decryptionKey = getKeyFromSalt(new String(Base64.getEncoder().encode(salt), StandardCharsets.UTF_8)).v2(); + // construct and initialize the decryption cipher + GCMParameterSpec gcmParameterSpec = new GCMParameterSpec(TAG_LENGTH_IN_BYTES * Byte.SIZE, encryptedData, SALT_LENGTH_IN_BYTES, + IV_LENGTH_IN_BYTES); + Cipher cipher = Cipher.getInstance(CIPHER_ALGO + "/" + CIPHER_MODE + "/" + CIPHER_PADDING); + cipher.init(Cipher.DECRYPT_MODE, decryptionKey, gcmParameterSpec); + // update the cipher with the associated data + if (associatedData != null && associatedData.length > 0) { + cipher.updateAAD(associatedData); + } + // decrypt data + return cipher.doFinal(encryptedData, SALT_LENGTH_IN_BYTES + IV_LENGTH_IN_BYTES, + encryptedData.length - SALT_LENGTH_IN_BYTES - IV_LENGTH_IN_BYTES); + } + + private SecretKey generatePasswordBasedSecretKey(char[] password, byte[] salt) throws NoSuchAlgorithmException, + InvalidKeySpecException { + PBEKeySpec keySpec = new PBEKeySpec(password, salt, KDF_ITER, KEY_SIZE_IN_BITS); + SecretKeyFactory keyFactory = SecretKeyFactory.getInstance(KDF_ALGO); + SecretKey secretKey = keyFactory.generateSecret(keySpec); + SecretKeySpec secret = new SecretKeySpec(secretKey.getEncoded(), CIPHER_ALGO); + return secret; + } + + /** + * Return a secret key given the salt; computes the key if the key for the salt argument is not already cached. + */ + private Tuple getKeyFromSalt(String salt) throws ExecutionException { + return this.keyBySaltCache.computeIfAbsent(salt, (ignore) -> { + byte[] saltBytes = Base64.getDecoder().decode(salt); + SecretKey secretKey = generatePasswordBasedSecretKey(password, saltBytes); + return new Tuple<>(saltBytes, secretKey); + }); + } + + /** + * Replaces the currently exhausted salt supplier with a new one. The new salt is generated randomly. + */ + private void resetCurrentEncryptionKeySalt(LimitedSupplier currentExhaustedSupplier) { + // generate a new random salt + byte[] randomEncryptionKeySaltBytes = new byte[SALT_LENGTH_IN_BYTES]; + secureRandom.nextBytes(randomEncryptionKeySaltBytes); + LimitedSupplier newSaltSupplier = new LimitedSupplier<>( + new String(Base64.getEncoder().encode(randomEncryptionKeySaltBytes), StandardCharsets.UTF_8), + ENCRYPT_INVOKE_LIMIT_USING_SAME_KEY); + // replace the old salt supplier with the new one + this.currentEncryptionKeySalt.compareAndExchange(currentExhaustedSupplier, newSaltSupplier); + } + + private Tuple useEncryptionKey() throws ExecutionException { + // get the salt of the encryption key + LimitedSupplier currentEncryptionKeySaltSupplier = currentEncryptionKeySalt.get(); + Optional encryptionKeySalt = currentEncryptionKeySaltSupplier.get(); + if (encryptionKeySalt.isPresent()) { + // the salt has NOT been used more than {@code #ENCRYPT_INVOKE_LIMIT_USING_SAME_KEY} times + return getKeyFromSalt(encryptionKeySalt.get()); + } + // change the salt used to generate a new encryption key + resetCurrentEncryptionKeySalt(currentEncryptionKeySaltSupplier); + // try to use the new supplier again + return useEncryptionKey(); + } + + /** + * A supplier accepting a limited number of retrieve (get) invocations. After the limit has been exceeded + * the supplier returns {@code Optional#empty()}. + */ + static class LimitedSupplier implements Supplier> { + // the current {@code #get()) invocation count + private final AtomicLong count; + // the constant value to return when the invocation count has not been exceeded + private final Optional value; + private final long limit; + + LimitedSupplier(T value, long limit) { + if (limit <= 0) { + throw new IllegalArgumentException("limit argument must be strictly positive"); + } + this.count = new AtomicLong(0L); + this.value = Optional.of(Objects.requireNonNull(value)); + this.limit = limit; + } + + @Override + public Optional get() { + long invocationCount = count.getAndUpdate(prev -> prev < limit ? prev + 1 : limit); + if (invocationCount < limit) { + return value; + } + return Optional.empty(); + } + + } + +} diff --git a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptedFSBlobStoreRepositoryIntegTests.java b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptedFSBlobStoreRepositoryIntegTests.java new file mode 100644 index 0000000000000..7dfbda5c15d85 --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptedFSBlobStoreRepositoryIntegTests.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.repositories.encrypted; + +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; +import org.elasticsearch.repositories.fs.FsRepository; +import org.junit.BeforeClass; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public class EncryptedFSBlobStoreRepositoryIntegTests extends ESBlobStoreRepositoryIntegTestCase { + + private static List repositoryNames; + + @BeforeClass + private static void preGenerateRepositoryNames() { + List names = new ArrayList<>(); + for (int i = 0; i < 32; i++) { + names.add("test-repo-" + i); + } + repositoryNames = Collections.synchronizedList(names); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), License.LicenseType.TRIAL.getTypeName()) + .setSecureSettings(nodeSecureSettings(nodeOrdinal)) + .build(); + } + + protected MockSecureSettings nodeSecureSettings(int nodeOrdinal) { + MockSecureSettings secureSettings = new MockSecureSettings(); + for (String repositoryName : repositoryNames) { + secureSettings.setString(EncryptedRepositoryPlugin.ENCRYPTION_PASSWORD_SETTING. + getConcreteSettingForNamespace(repositoryName).getKey(), "password" + repositoryName); + } + return secureSettings; + } + + @Override + protected String randomRepositoryName() { + return repositoryNames.remove(randomIntBetween(0, repositoryNames.size() - 1)); + } + + protected long blobLengthFromDiskLength(BlobMetaData blobMetaData) { + if (BlobStoreRepository.INDEX_LATEST_BLOB.equals(blobMetaData.name())) { + // index.latest is not encrypted, hence the size on disk is equal to the content + return blobMetaData.length(); + } else { + return DecryptionPacketsInputStream.getDecryptionLength(blobMetaData.length() - + EncryptedRepository.MetadataIdentifier.byteLength(), EncryptedRepository.PACKET_LENGTH_IN_BYTES); + } + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateEncryptedRepositoryPlugin.class); + } + + @Override + protected String repositoryType() { + return EncryptedRepositoryPlugin.REPOSITORY_TYPE_NAME; + } + + @Override + protected Settings repositorySettings() { + final Settings.Builder settings = Settings.builder(); + settings.put(super.repositorySettings()); + settings.put("location", randomRepoPath()); + settings.put(EncryptedRepositoryPlugin.DELEGATE_TYPE.getKey(), FsRepository.TYPE); + if (randomBoolean()) { + long size = 1 << randomInt(10); + settings.put("chunk_size", new ByteSizeValue(size, ByteSizeUnit.KB)); + } + return settings.build(); + } + +} diff --git a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStreamTests.java b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStreamTests.java index 90ec1b677c141..a25514eb4d2d5 100644 --- a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStreamTests.java +++ b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/EncryptionPacketsInputStreamTests.java @@ -110,10 +110,10 @@ public void testShortPacketSizes() throws Exception { public void testPacketSizeMultipleOfAESBlockSize() throws Exception { int packetSize = 1 + Randomness.get().nextInt(8); - testEncryptPacketWise(1 + Randomness.get().nextInt(packetSize * EncryptedRepository.AES_BLOCK_SIZE_IN_BYTES), - packetSize * EncryptedRepository.AES_BLOCK_SIZE_IN_BYTES, new DefaultBufferedReadAllStrategy()); - testEncryptPacketWise(packetSize * EncryptedRepository.AES_BLOCK_SIZE_IN_BYTES + Randomness.get().nextInt(8192), - packetSize * EncryptedRepository.AES_BLOCK_SIZE_IN_BYTES, new DefaultBufferedReadAllStrategy()); + testEncryptPacketWise(1 + Randomness.get().nextInt(packetSize * EncryptedRepository.AES_BLOCK_LENGTH_IN_BYTES), + packetSize * EncryptedRepository.AES_BLOCK_LENGTH_IN_BYTES, new DefaultBufferedReadAllStrategy()); + testEncryptPacketWise(packetSize * EncryptedRepository.AES_BLOCK_LENGTH_IN_BYTES + Randomness.get().nextInt(8192), + packetSize * EncryptedRepository.AES_BLOCK_LENGTH_IN_BYTES, new DefaultBufferedReadAllStrategy()); } public void testMarkAndResetPacketBoundaryNoMock() throws Exception { @@ -394,7 +394,7 @@ private void testEncryptPacketWise(int size, int packetSize, ReadStrategy readSt GCMParameterSpec gcmParameterSpec = new GCMParameterSpec(EncryptedRepository.GCM_TAG_LENGTH_IN_BYTES * Byte.SIZE, Arrays.copyOfRange(ciphertextArray, ciphertextOffset, ciphertextOffset + EncryptedRepository.GCM_IV_LENGTH_IN_BYTES)); - Cipher packetCipher = Cipher.getInstance(EncryptedRepository.GCM_ENCRYPTION_SCHEME); + Cipher packetCipher = Cipher.getInstance(EncryptedRepository.DATA_ENCRYPTION_SCHEME); packetCipher.init(Cipher.DECRYPT_MODE, secretKey, gcmParameterSpec); try (InputStream packetDecryptionInputStream = new CipherInputStream(new ByteArrayInputStream(ciphertextArray, ciphertextOffset + EncryptedRepository.GCM_IV_LENGTH_IN_BYTES, diff --git a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/LocalStateEncryptedRepositoryPlugin.java b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/LocalStateEncryptedRepositoryPlugin.java new file mode 100644 index 0000000000000..cd1cae28ffd28 --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/LocalStateEncryptedRepositoryPlugin.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.repositories.encrypted; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; + +import java.nio.file.Path; + +public class LocalStateEncryptedRepositoryPlugin extends LocalStateCompositeXPackPlugin { + + final EncryptedRepositoryPlugin encryptedRepositoryPlugin; + + public LocalStateEncryptedRepositoryPlugin(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + LocalStateEncryptedRepositoryPlugin thisVar = this; + + encryptedRepositoryPlugin = new EncryptedRepositoryPlugin(settings) { + @Override + protected XPackLicenseState getLicenseState() { + return thisVar.getLicenseState(); + } + }; + plugins.add(encryptedRepositoryPlugin); + } + +} diff --git a/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/PasswordBasedEncryptionTests.java b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/PasswordBasedEncryptionTests.java new file mode 100644 index 0000000000000..79b8a0c73af4b --- /dev/null +++ b/x-pack/plugin/repository-encrypted/src/test/java/org/elasticsearch/repositories/encrypted/PasswordBasedEncryptionTests.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.repositories.encrypted; + +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; + +import java.security.SecureRandom; + +public class PasswordBasedEncryptionTests extends ESTestCase { + + public void testEncryptAndDecryptEmpty() throws Exception { + PasswordBasedEncryption encryptor = new PasswordBasedEncryption(new char[] {'p', 'a', 's', 's'}, + SecureRandom.getInstance("SHA1PRNG")); + byte[] emptyEncrypted = encryptor.encrypt(new byte[0], null); + byte[] ans = encryptor.decrypt(emptyEncrypted, null); + assertThat(ans.length, Matchers.is(0)); + } + +}