diff --git a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index d25798ad071bd..0b62243e66afe 100644 --- a/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/build-tools/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -1139,7 +1139,7 @@ private void logFileContents(String description, Path from, boolean tailLogs) { } } if (foundLeaks) { - throw new TestClustersException("Found resource leaks in node logs."); + throw new TestClustersException("Found resource leaks in node log: " + from); } } diff --git a/docs/changelog/109636.yaml b/docs/changelog/109636.yaml new file mode 100644 index 0000000000000..f8f73a75dfd3d --- /dev/null +++ b/docs/changelog/109636.yaml @@ -0,0 +1,5 @@ +pr: 109636 +summary: "Ensure a lazy rollover request will rollover the target data stream once." +area: Data streams +type: bug +issues: [] diff --git a/docs/changelog/109848.yaml b/docs/changelog/109848.yaml new file mode 100644 index 0000000000000..858bbe84ef3a4 --- /dev/null +++ b/docs/changelog/109848.yaml @@ -0,0 +1,5 @@ +pr: 109848 +summary: Denser in-memory representation of `ShardBlobsToDelete` +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/109931.yaml b/docs/changelog/109931.yaml new file mode 100644 index 0000000000000..3575cfd49176f --- /dev/null +++ b/docs/changelog/109931.yaml @@ -0,0 +1,5 @@ +pr: 109931 +summary: Apply FLS to the contents of `IgnoredSourceFieldMapper` +area: Mapping +type: enhancement +issues: [] diff --git a/docs/reference/features/apis/reset-features-api.asciidoc b/docs/reference/features/apis/reset-features-api.asciidoc index d8ba0832cc2ad..2d2c7da039ea1 100644 --- a/docs/reference/features/apis/reset-features-api.asciidoc +++ b/docs/reference/features/apis/reset-features-api.asciidoc @@ -34,6 +34,11 @@ To list the features that will be affected, use the < snapshot"); ActionFuture future = client1.admin() .cluster() - .prepareCreateSnapshot(repositoryName, SNAPSHOT) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, SNAPSHOT) .setIndices(dataStream) .setWaitForCompletion(true) .setPartial(false) @@ -1036,13 +1057,16 @@ public void testCloneSnapshotThatIncludesDataStream() throws Exception { assertSuccessful( client.admin() .cluster() - .prepareCreateSnapshot(REPO, sourceSnapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, sourceSnapshotName) .setWaitForCompletion(true) .setIndices("ds", indexWithoutDataStream) .setIncludeGlobalState(false) .execute() ); - assertAcked(clusterAdmin().prepareCloneSnapshot(REPO, sourceSnapshotName, "target-snapshot-1").setIndices(indexWithoutDataStream)); + assertAcked( + clusterAdmin().prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, REPO, sourceSnapshotName, "target-snapshot-1") + .setIndices(indexWithoutDataStream) + ); } public void testPartialRestoreSnapshotThatIncludesDataStream() { @@ -1053,7 +1077,7 @@ public void testPartialRestoreSnapshotThatIncludesDataStream() { assertAcked(client.admin().indices().prepareDelete(indexWithoutDataStream)); RestoreInfo restoreInfo = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, snapshot) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshot) .setIndices(indexWithoutDataStream) .setWaitForCompletion(true) .setRestoreGlobalState(false) @@ -1078,7 +1102,7 @@ public void testPartialRestoreSnapshotThatIncludesDataStreamWithGlobalState() { assertAcked(client.admin().indices().prepareDelete(indexWithoutDataStream)); RestoreInfo restoreInfo = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, snapshot) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshot) .setIndices(indexWithoutDataStream) .setWaitForCompletion(true) .setRestoreGlobalState(true) @@ -1096,11 +1120,11 @@ public void testSnapshotDSDuringRollover() throws Exception { final boolean partial = randomBoolean(); blockAllDataNodes(repoName); final String snapshotName = "ds-snap"; - final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) - .setWaitForCompletion(true) - .setPartial(partial) - .setIncludeGlobalState(randomBoolean()) - .execute(); + final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotName + ).setWaitForCompletion(true).setPartial(partial).setIncludeGlobalState(randomBoolean()).execute(); waitForBlockOnAnyDataNode(repoName); awaitNumberOfSnapshotsInProgress(1); final ActionFuture rolloverResponse = indicesAdmin().rolloverIndex(new RolloverRequest("ds", null)); @@ -1117,7 +1141,7 @@ public void testSnapshotDSDuringRollover() throws Exception { assertThat(snapshotInfo.dataStreams(), hasItems("ds")); assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "ds" })).get()); - RestoreInfo restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) + RestoreInfo restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .setIndices("ds") .get() @@ -1134,11 +1158,11 @@ public void testSnapshotDSDuringRolloverAndDeleteOldIndex() throws Exception { createRepository(repoName, "mock"); blockAllDataNodes(repoName); final String snapshotName = "ds-snap"; - final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) - .setWaitForCompletion(true) - .setPartial(true) - .setIncludeGlobalState(randomBoolean()) - .execute(); + final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotName + ).setWaitForCompletion(true).setPartial(true).setIncludeGlobalState(randomBoolean()).execute(); waitForBlockOnAnyDataNode(repoName); awaitNumberOfSnapshotsInProgress(1); final RolloverResponse rolloverResponse = indicesAdmin().rolloverIndex(new RolloverRequest("ds", null)).get(); @@ -1157,7 +1181,7 @@ public void testSnapshotDSDuringRolloverAndDeleteOldIndex() throws Exception { ); assertAcked(client().execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(new String[] { "other-ds" }))); - RestoreInfo restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) + RestoreInfo restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .setIndices("other-ds") .get() @@ -1176,7 +1200,7 @@ public void testExcludeDSFromSnapshotWhenExcludingItsIndices() { assertAcked(client.admin().indices().prepareDelete(indexWithoutDataStream)); RestoreInfo restoreInfo = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, snapshot) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshot) .setWaitForCompletion(true) .setRestoreGlobalState(false) .get() @@ -1201,7 +1225,7 @@ public void testExcludeDSFromSnapshotWhenExcludingItsIndicesWithGlobalState() { assertAcked(client.admin().indices().prepareDelete(indexWithoutDataStream)); RestoreInfo restoreInfo = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, snapshot) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshot) .setWaitForCompletion(true) .setRestoreGlobalState(true) .get() @@ -1221,7 +1245,7 @@ public void testRestoreSnapshotFully() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, snapshotName) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshotName) .setWaitForCompletion(true) .get(); assertEquals(RestStatus.OK, restoreSnapshotResponse.status()); @@ -1241,7 +1265,7 @@ public void testRestoreDataStreamAliasWithConflictingDataStream() throws Excepti assertAcked(client.execute(CreateDataStreamAction.INSTANCE, request).actionGet()); var e = expectThrows( IllegalStateException.class, - client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true) + client.admin().cluster().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshotName).setWaitForCompletion(true) ); assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); } finally { @@ -1264,7 +1288,7 @@ public void testRestoreDataStreamAliasWithConflictingIndicesAlias() throws Excep var e = expectThrows( IllegalStateException.class, - client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true) + client.admin().cluster().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshotName).setWaitForCompletion(true) ); assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (my-alias)")); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LazyRolloverDuringDisruptionIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LazyRolloverDuringDisruptionIT.java new file mode 100644 index 0000000000000..89d576e74be2f --- /dev/null +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/LazyRolloverDuringDisruptionIT.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequestBuilder; +import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; +import org.elasticsearch.action.datastreams.CreateDataStreamAction; +import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.IntermittentLongGCDisruption; +import org.elasticsearch.test.disruption.SingleNodeDisruption; +import org.elasticsearch.xcontent.XContentType; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) +public class LazyRolloverDuringDisruptionIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return List.of(DataStreamsPlugin.class); + } + + public void testRolloverIsExecutedOnce() throws ExecutionException, InterruptedException { + String masterNode = internalCluster().startMasterOnlyNode(); + internalCluster().startDataOnlyNodes(3); + ensureStableCluster(4); + + String dataStreamName = "my-data-stream"; + createDataStream(dataStreamName); + + // Mark it to lazy rollover + new RolloverRequestBuilder(client()).setRolloverTarget(dataStreamName).lazy(true).execute().get(); + + // Verify that the data stream is marked for rollover and that it has currently one index + DataStream dataStream = getDataStream(dataStreamName); + assertThat(dataStream.rolloverOnWrite(), equalTo(true)); + assertThat(dataStream.getBackingIndices().getIndices().size(), equalTo(1)); + + // Introduce a disruption to the master node that should delay the rollover execution + SingleNodeDisruption masterNodeDisruption = new IntermittentLongGCDisruption(random(), masterNode, 100, 200, 30000, 60000); + internalCluster().setDisruptionScheme(masterNodeDisruption); + masterNodeDisruption.startDisrupting(); + + // Start indexing operations + int docs = randomIntBetween(5, 10); + CountDownLatch countDownLatch = new CountDownLatch(docs); + for (int i = 0; i < docs; i++) { + var indexRequest = new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.CREATE); + final String doc = "{ \"@timestamp\": \"2099-05-06T16:21:15.000Z\", \"message\": \"something cool happened\" }"; + indexRequest.source(doc, XContentType.JSON); + client().index(indexRequest, new ActionListener<>() { + @Override + public void onResponse(DocWriteResponse docWriteResponse) { + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("Indexing request should have succeeded eventually, failed with " + e.getMessage()); + } + }); + } + + // End the disruption so that all pending tasks will complete + masterNodeDisruption.stopDisrupting(); + + // Wait for all the indexing requests to be processed successfully + countDownLatch.await(); + + // Verify that the rollover has happened once + dataStream = getDataStream(dataStreamName); + assertThat(dataStream.rolloverOnWrite(), equalTo(false)); + assertThat(dataStream.getBackingIndices().getIndices().size(), equalTo(2)); + } + + private DataStream getDataStream(String dataStreamName) { + return client().execute(GetDataStreamAction.INSTANCE, new GetDataStreamAction.Request(new String[] { dataStreamName })) + .actionGet() + .getDataStreams() + .get(0) + .getDataStream(); + } + + private void createDataStream(String dataStreamName) throws InterruptedException, ExecutionException { + final TransportPutComposableIndexTemplateAction.Request putComposableTemplateRequest = + new TransportPutComposableIndexTemplateAction.Request("my-template"); + putComposableTemplateRequest.indexTemplate( + ComposableIndexTemplate.builder() + .indexPatterns(List.of(dataStreamName)) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build() + ); + final AcknowledgedResponse putComposableTemplateResponse = client().execute( + TransportPutComposableIndexTemplateAction.TYPE, + putComposableTemplateRequest + ).actionGet(); + assertThat(putComposableTemplateResponse.isAcknowledged(), is(true)); + + final CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); + final AcknowledgedResponse createDataStreamResponse = client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest) + .get(); + assertThat(createDataStreamResponse.isAcknowledged(), is(true)); + } +} diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java index 698656dfa7406..c147677cf856c 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/SystemDataStreamSnapshotIT.java @@ -78,7 +78,10 @@ public void testSystemDataStreamInGlobalState() throws Exception { } assertSuccessful( - clusterAdmin().prepareCreateSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIncludeGlobalState(true).execute() + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) + .setWaitForCompletion(true) + .setIncludeGlobalState(true) + .execute() ); // We have to delete the data stream directly, as the feature reset API doesn't clean up system data streams yet @@ -98,7 +101,7 @@ public void testSystemDataStreamInGlobalState() throws Exception { // Make sure requesting the data stream by name throws. // For some reason, expectThrows() isn't working for me here, hence the try/catch. try { - clusterAdmin().prepareRestoreSnapshot(REPO, SNAPSHOT) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setIndices(".test-data-stream") .setWaitForCompletion(true) .setRestoreGlobalState(randomBoolean()) // this shouldn't matter @@ -117,7 +120,7 @@ public void testSystemDataStreamInGlobalState() throws Exception { assertSystemDataStreamDoesNotExist(); // Now actually restore the data stream - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO, SNAPSHOT) + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setRestoreGlobalState(true) .get(); @@ -132,7 +135,10 @@ public void testSystemDataStreamInGlobalState() throws Exception { // Attempting to restore again without specifying indices or global/feature states should work, as the wildcard should not be // resolved to system indices/data streams. - clusterAdmin().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setRestoreGlobalState(false).get(); + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) + .setWaitForCompletion(true) + .setRestoreGlobalState(false) + .get(); assertEquals(restoreSnapshotResponse.getRestoreInfo().totalShards(), restoreSnapshotResponse.getRestoreInfo().successfulShards()); } @@ -182,7 +188,7 @@ public void testSystemDataStreamInFeatureState() throws Exception { } SnapshotInfo snapshotInfo = assertSuccessful( - clusterAdmin().prepareCreateSnapshot(REPO, SNAPSHOT) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setIndices("my-index") .setFeatureStates(SystemDataStreamTestPlugin.class.getSimpleName()) .setWaitForCompletion(true) @@ -207,7 +213,7 @@ public void testSystemDataStreamInFeatureState() throws Exception { assertThat(indicesRemaining.indices(), arrayWithSize(0)); } - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO, SNAPSHOT) + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIndices("my-index") .setFeatureStates(SystemDataStreamTestPlugin.class.getSimpleName()) diff --git a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index d9ab689c05a5c..2fc728a4fae34 100644 --- a/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/modules/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -113,7 +113,11 @@ protected SecureSettings credentials() { @Override protected void createRepository(String repoName) { - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(repoName) + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + repoName + ) .setType("azure") .setSettings( Settings.builder() diff --git a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java index b2df41c69eda7..4afa6f2a10b5c 100644 --- a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java +++ b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -71,7 +71,11 @@ protected SecureSettings credentials() { @Override protected void createRepository(final String repoName) { - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(repoName) + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + repoName + ) .setType("gcs") .setSettings( Settings.builder() diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index 88f0e01db3e6a..c97e26651d4ee 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -200,8 +200,8 @@ public void testAbortRequestStats() throws Exception { // Intentionally fail snapshot to trigger abortMultipartUpload requests shouldFailCompleteMultipartUploadRequest.set(true); final String snapshot = "snapshot"; - clusterAdmin().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index).get(); - clusterAdmin().prepareDeleteSnapshot(repository, snapshot).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true).setIndices(index).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).get(); final RepositoryStats repositoryStats = StreamSupport.stream( internalCluster().getInstances(RepositoriesService.class).spliterator(), @@ -242,12 +242,16 @@ public void testMetrics() throws Exception { assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); final String snapshot = "snapshot"; - assertSuccessfulSnapshot(clusterAdmin().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index)); + assertSuccessfulSnapshot( + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true).setIndices(index) + ); assertAcked(client().admin().indices().prepareDelete(index)); - assertSuccessfulRestore(clusterAdmin().prepareRestoreSnapshot(repository, snapshot).setWaitForCompletion(true)); + assertSuccessfulRestore( + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true) + ); ensureGreen(index); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repository, snapshot).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).get()); final Map aggregatedMetrics = new HashMap<>(); // Compare collected stats and metrics for each node and they should be the same @@ -389,7 +393,7 @@ public void testEnforcedCooldownPeriod() throws IOException { true ); - final SnapshotId fakeOldSnapshot = clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-old") + final SnapshotId fakeOldSnapshot = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-old") .setWaitForCompletion(true) .setIndices() .get() @@ -434,15 +438,15 @@ public void testEnforcedCooldownPeriod() throws IOException { final String newSnapshotName = "snapshot-new"; final long beforeThrottledSnapshot = repository.threadPool().relativeTimeInNanos(); - clusterAdmin().prepareCreateSnapshot(repoName, newSnapshotName).setWaitForCompletion(true).setIndices().get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, newSnapshotName).setWaitForCompletion(true).setIndices().get(); assertThat(repository.threadPool().relativeTimeInNanos() - beforeThrottledSnapshot, greaterThan(TEST_COOLDOWN_PERIOD.getNanos())); final long beforeThrottledDelete = repository.threadPool().relativeTimeInNanos(); - clusterAdmin().prepareDeleteSnapshot(repoName, newSnapshotName).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, newSnapshotName).get(); assertThat(repository.threadPool().relativeTimeInNanos() - beforeThrottledDelete, greaterThan(TEST_COOLDOWN_PERIOD.getNanos())); final long beforeFastDelete = repository.threadPool().relativeTimeInNanos(); - clusterAdmin().prepareDeleteSnapshot(repoName, fakeOldSnapshot.getName()).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, fakeOldSnapshot.getName()).get(); assertThat(repository.threadPool().relativeTimeInNanos() - beforeFastDelete, lessThan(TEST_COOLDOWN_PERIOD.getNanos())); } diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index 5064910723ab6..2359176abf715 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -127,10 +127,11 @@ protected void createRepository(String repoName) { settings.put("storage_class", storageClass); } } - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository(repoName) - .setType("s3") - .setSettings(settings) - .get(); + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + repoName + ).setType("s3").setSettings(settings).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 9a1d12fab0af5..e59b3e8f90620 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -244,7 +244,12 @@ public void sendResponse(RestResponse response) { } private void createRepository(final String name, final Settings repositorySettings) { - assertAcked(clusterAdmin().preparePutRepository(name).setType(S3Repository.TYPE).setVerify(false).setSettings(repositorySettings)); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name) + .setType(S3Repository.TYPE) + .setVerify(false) + .setSettings(repositorySettings) + ); } /** diff --git a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java index 80798d931e93f..335da9123ed5a 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/elasticsearch/repositories/url/URLSnapshotRestoreIT.java @@ -46,7 +46,7 @@ public void testUrlRepository() throws Exception { assertAcked( client.admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType(FsRepository.TYPE) .setSettings( Settings.builder() @@ -69,7 +69,7 @@ public void testUrlRepository() throws Exception { logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") .get(); @@ -79,7 +79,7 @@ public void testUrlRepository() throws Exception { SnapshotState state = client.admin() .cluster() - .prepareGetSnapshots("test-repo") + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .get() .getSnapshots() @@ -94,7 +94,7 @@ public void testUrlRepository() throws Exception { assertAcked( client.admin() .cluster() - .preparePutRepository("url-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "url-repo") .setType(URLRepository.TYPE) .setSettings( Settings.builder() @@ -105,7 +105,7 @@ public void testUrlRepository() throws Exception { logger.info("--> restore index after deletion"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("url-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "url-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") .get(); @@ -114,15 +114,18 @@ public void testUrlRepository() throws Exception { assertHitCount(client.prepareSearch("test-idx").setSize(0), 100); logger.info("--> list available shapshots"); - GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get(); + GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "url-repo").get(); assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1)); logger.info("--> delete snapshot"); - AcknowledgedResponse deleteSnapshotResponse = client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get(); + AcknowledgedResponse deleteSnapshotResponse = client.admin() + .cluster() + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .get(); assertAcked(deleteSnapshotResponse); logger.info("--> list available shapshot again, no snapshots should be returned"); - getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("url-repo").get(); + getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "url-repo").get(); assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(0)); } @@ -130,7 +133,7 @@ public void testUrlRepositoryPermitsShutdown() throws Exception { assertAcked( client().admin() .cluster() - .preparePutRepository("url-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "url-repo") .setType(URLRepository.TYPE) .setVerify(false) .setSettings(Settings.builder().put(URLRepository.URL_SETTING.getKey(), "http://localhost/")) diff --git a/muted-tests.yml b/muted-tests.yml index 8e9ed04038074..a17e95e9a5b3f 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -75,6 +75,8 @@ tests: - class: "org.elasticsearch.xpack.esql.action.AsyncEsqlQueryActionIT" issue: "https://github.com/elastic/elasticsearch/issues/109944" method: "testBasicAsyncExecution" +- class: "org.elasticsearch.xpack.security.authz.store.NativePrivilegeStoreCacheTests" + issue: "https://github.com/elastic/elasticsearch/issues/110015" # Examples: diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java index a6d2bdcf8a1d4..6726929bdc91c 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java @@ -39,7 +39,7 @@ protected SecureSettings credentials() { @Override protected void createRepository(String repoName) { assertAcked( - clusterAdmin().preparePutRepository(repoName) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType("hdfs") .setSettings( Settings.builder() diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java index 0e2ec25b6cfaa..081c6c26319ab 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsTests.java @@ -44,7 +44,7 @@ public void testSimpleWorkflow() { assertAcked( client.admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings( Settings.builder() @@ -75,7 +75,7 @@ public void testSimpleWorkflow() { logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-3") .get(); @@ -86,7 +86,14 @@ public void testSimpleWorkflow() { ); assertThat( - client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), + client.admin() + .cluster() + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots("test-snap") + .get() + .getSnapshots() + .get(0) + .state(), equalTo(SnapshotState.SUCCESS) ); @@ -111,7 +118,7 @@ public void testSimpleWorkflow() { logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -127,7 +134,7 @@ public void testSimpleWorkflow() { logger.info("--> restore one index after deletion"); restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-2") .get(); @@ -143,7 +150,10 @@ public void testSimpleWorkflow() { public void testMissingUri() { try { - clusterAdmin().preparePutRepository("test-repo").setType("hdfs").setSettings(Settings.EMPTY).get(); + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") + .setType("hdfs") + .setSettings(Settings.EMPTY) + .get(); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -153,7 +163,7 @@ public void testMissingUri() { public void testEmptyUri() { try { - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings(Settings.builder().put("uri", "/path").build()) .get(); @@ -166,7 +176,7 @@ public void testEmptyUri() { public void testNonHdfsUri() { try { - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings(Settings.builder().put("uri", "file:///").build()) .get(); @@ -179,7 +189,7 @@ public void testNonHdfsUri() { public void testPathSpecifiedInHdfs() { try { - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings(Settings.builder().put("uri", "hdfs:///some/path").build()) .get(); @@ -192,7 +202,7 @@ public void testPathSpecifiedInHdfs() { public void testMissingPath() { try { - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings(Settings.builder().put("uri", "hdfs:///").build()) .get(); @@ -207,7 +217,7 @@ public void testReplicationFactorBelowOne() { try { client().admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings(Settings.builder().put("uri", "hdfs:///").put("replication_factor", "0").put("path", "foo").build()) .get(); @@ -222,7 +232,7 @@ public void testReplicationFactorOverMaxShort() { try { client().admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings(Settings.builder().put("uri", "hdfs:///").put("replication_factor", "32768").put("path", "foo").build()) .get(); @@ -237,7 +247,7 @@ public void testReplicationFactorBelowReplicationMin() { try { client().admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings( Settings.builder() @@ -259,7 +269,7 @@ public void testReplicationFactorOverReplicationMax() { try { client().admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("hdfs") .setSettings( Settings.builder() diff --git a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java index 93dcd5a12d43d..9d330cd7e35eb 100644 --- a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -262,7 +262,7 @@ private static void createRepository(String repoName, boolean readOnly, boolean Request repoReq = new Request("PUT", "/_snapshot/" + repoName); repoReq.setJsonEntity( Strings.toString( - new PutRepositoryRequest().type("fs") + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).type("fs") .verify(verify) .settings(Settings.builder().put("location", repoName).put("readonly", readOnly).build()) ) diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java index b12a70ccb8425..4c6774988d7ae 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/snapshots/RestGetSnapshotsIT.java @@ -96,7 +96,7 @@ public void testSortOrder() throws Exception { private void doTestSortOrder(String repoName, Collection allSnapshotNames, SortOrder order) throws IOException { final boolean includeIndexNames = randomBoolean(); - final List defaultSorting = clusterAdmin().prepareGetSnapshots(repoName) + final List defaultSorting = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) .setOrder(order) .setIncludeIndexNames(includeIndexNames) .get() @@ -239,7 +239,7 @@ public void testFilterBySLMPolicy() throws Exception { final String repoName = "test-repo"; AbstractSnapshotIntegTestCase.createRepository(logger, repoName, "fs"); AbstractSnapshotIntegTestCase.createNSnapshots(logger, repoName, randomIntBetween(1, 5)); - final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots("*") + final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "*") .setSnapshots("*") .setSort(SnapshotSortKey.NAME) .get() @@ -248,7 +248,7 @@ public void testFilterBySLMPolicy() throws Exception { final String policyName = "some-policy"; final SnapshotInfo withPolicy = AbstractSnapshotIntegTestCase.assertSuccessful( logger, - clusterAdmin().prepareCreateSnapshot(repoName, snapshotWithPolicy) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotWithPolicy) .setUserMetadata(Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, policyName)) .setWaitForCompletion(true) .execute() @@ -268,7 +268,7 @@ public void testFilterBySLMPolicy() throws Exception { final String otherPolicyName = "other-policy"; final SnapshotInfo withOtherPolicy = AbstractSnapshotIntegTestCase.assertSuccessful( logger, - clusterAdmin().prepareCreateSnapshot(repoName, snapshotWithOtherPolicy) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotWithOtherPolicy) .setUserMetadata(Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, otherPolicyName)) .setWaitForCompletion(true) .execute() @@ -276,7 +276,7 @@ public void testFilterBySLMPolicy() throws Exception { assertThat(getAllSnapshotsForPolicies("*"), is(List.of(withOtherPolicy, withPolicy))); assertThat(getAllSnapshotsForPolicies(policyName, otherPolicyName), is(List.of(withOtherPolicy, withPolicy))); assertThat(getAllSnapshotsForPolicies(policyName, otherPolicyName, "no-such-policy*"), is(List.of(withOtherPolicy, withPolicy))); - final List allSnapshots = clusterAdmin().prepareGetSnapshots("*") + final List allSnapshots = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "*") .setSnapshots("*") .setSort(SnapshotSortKey.NAME) .get() @@ -293,7 +293,7 @@ public void testSortAfterStartTime() throws Exception { final SnapshotInfo snapshot2 = createFullSnapshotWithUniqueStartTime(repoName, "snapshot-2", startTimes); final SnapshotInfo snapshot3 = createFullSnapshotWithUniqueStartTime(repoName, "snapshot-3", startTimes); - final List allSnapshotInfo = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshotInfo = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.START_TIME) .get() @@ -310,7 +310,7 @@ public void testSortAfterStartTime() throws Exception { assertThat(allAfterStartTimeAscending(startTime3), is(List.of(snapshot3))); assertThat(allAfterStartTimeAscending(startTime3 + 1), empty()); - final List allSnapshotInfoDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshotInfoDesc = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.START_TIME) .setOrder(SortOrder.DESC) @@ -331,7 +331,7 @@ private SnapshotInfo createFullSnapshotWithUniqueStartTime(String repoName, Stri final SnapshotInfo snapshotInfo = AbstractSnapshotIntegTestCase.createFullSnapshot(logger, repoName, snapshotName); if (forbiddenStartTimes.contains(snapshotInfo.startTime())) { logger.info("--> snapshot start time collided"); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName).get()); } else { assertTrue(forbiddenStartTimes.add(snapshotInfo.startTime())); return snapshotInfo; diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/features.reset_features.json b/rest-api-spec/src/main/resources/rest-api-spec/api/features.reset_features.json index 1a7f944e88079..dec102a681c81 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/features.reset_features.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/features.reset_features.json @@ -18,6 +18,12 @@ ] } ] + }, + "params":{ + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + } } } } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml index 3d95712d30b30..9fc82eb125def 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/20_synthetic_source.yml @@ -70,6 +70,53 @@ object with unmapped fields: - match: { hits.hits.1._source.a.very.deeply.nested.field: BBBB } +--- +unmapped arrays: + - requires: + cluster_features: ["mapper.track_ignored_source"] + reason: requires tracking ignored source + + - do: + indices.create: + index: test + body: + settings: + index: + mapping: + total_fields: + ignore_dynamic_beyond_limit: true + limit: 1 + + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + + - do: + bulk: + index: test + refresh: true + body: + - '{ "create": { } }' + - '{ "name": "aaaa", "object_array": [ { "int_value": 10 }, { "int_value": 20 } ] }' + - '{ "create": { } }' + - '{ "name": "bbbb", "value_array": [ 100, 200, 300 ] }' + + - do: + search: + index: test + sort: name + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._source.name: aaaa } + - match: { hits.hits.0._source.object_array.0.int_value: 10 } + - match: { hits.hits.0._source.object_array.1.int_value: 20 } + - match: { hits.hits.1._source.name: bbbb } + - match: { hits.hits.1._source.value_array: [ 100, 200, 300] } + + --- nested object with unmapped fields: - requires: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index 3aee1fdf505fe..47ed06ed4a905 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -32,7 +32,7 @@ public void testPutRepositoryWithBlocks() { try { setClusterReadOnly(true); assertBlocked( - clusterAdmin().preparePutRepository("test-repo-blocks") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks") .setType("fs") .setVerify(false) .setSettings(Settings.builder().put("location", randomRepoPath())), @@ -44,7 +44,7 @@ public void testPutRepositoryWithBlocks() { logger.info("--> registering a repository is allowed when the cluster is not read only"); assertAcked( - clusterAdmin().preparePutRepository("test-repo-blocks") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks") .setType("fs") .setVerify(false) .setSettings(Settings.builder().put("location", randomRepoPath())) @@ -53,7 +53,7 @@ public void testPutRepositoryWithBlocks() { public void testVerifyRepositoryWithBlocks() { assertAcked( - clusterAdmin().preparePutRepository("test-repo-blocks") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks") .setType("fs") .setVerify(false) .setSettings(Settings.builder().put("location", randomRepoPath())) @@ -62,7 +62,11 @@ public void testVerifyRepositoryWithBlocks() { // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - VerifyRepositoryResponse response = clusterAdmin().prepareVerifyRepository("test-repo-blocks").get(); + VerifyRepositoryResponse response = clusterAdmin().prepareVerifyRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "test-repo-blocks" + ).get(); assertThat(response.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); } finally { setClusterReadOnly(false); @@ -71,7 +75,7 @@ public void testVerifyRepositoryWithBlocks() { public void testDeleteRepositoryWithBlocks() { assertAcked( - clusterAdmin().preparePutRepository("test-repo-blocks") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks") .setType("fs") .setVerify(false) .setSettings(Settings.builder().put("location", randomRepoPath())) @@ -80,18 +84,21 @@ public void testDeleteRepositoryWithBlocks() { logger.info("--> deleting a repository is blocked when the cluster is read only"); try { setClusterReadOnly(true); - assertBlocked(clusterAdmin().prepareDeleteRepository("test-repo-blocks"), Metadata.CLUSTER_READ_ONLY_BLOCK); + assertBlocked( + clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks"), + Metadata.CLUSTER_READ_ONLY_BLOCK + ); } finally { setClusterReadOnly(false); } logger.info("--> deleting a repository is allowed when the cluster is not read only"); - assertAcked(clusterAdmin().prepareDeleteRepository("test-repo-blocks")); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks")); } public void testGetRepositoryWithBlocks() { assertAcked( - clusterAdmin().preparePutRepository("test-repo-blocks") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-blocks") .setType("fs") .setVerify(false) .setSettings(Settings.builder().put("location", randomRepoPath())) @@ -100,7 +107,7 @@ public void testGetRepositoryWithBlocks() { // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - GetRepositoriesResponse response = clusterAdmin().prepareGetRepositories("test-repo-blocks").get(); + GetRepositoriesResponse response = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, "test-repo-blocks").get(); assertThat(response.repositories(), hasSize(1)); } finally { setClusterReadOnly(false); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index b6b0b2e54e691..eff71d0caf650 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -56,17 +56,21 @@ protected void setUpRepository() throws Exception { logger.info("--> register a repository"); assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, REPOSITORY_NAME) .setType("fs") .setSettings(Settings.builder().put("location", randomRepoPath())) ); logger.info("--> verify the repository"); - VerifyRepositoryResponse verifyResponse = clusterAdmin().prepareVerifyRepository(REPOSITORY_NAME).get(); + VerifyRepositoryResponse verifyResponse = clusterAdmin().prepareVerifyRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + REPOSITORY_NAME + ).get(); assertThat(verifyResponse.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); logger.info("--> create a snapshot"); - CreateSnapshotResponse snapshotResponse = clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) + CreateSnapshotResponse snapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, SNAPSHOT_NAME) .setIncludeGlobalState(true) .setWaitForCompletion(true) .get(); @@ -79,7 +83,10 @@ public void testCreateSnapshotWithBlocks() { try { setClusterReadOnly(true); assertThat( - clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1").setWaitForCompletion(true).get().status(), + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, "snapshot-1") + .setWaitForCompletion(true) + .get() + .status(), equalTo(RestStatus.OK) ); } finally { @@ -87,7 +94,7 @@ public void testCreateSnapshotWithBlocks() { } logger.info("--> creating a snapshot is allowed when the cluster is not read only"); - CreateSnapshotResponse response = clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2") + CreateSnapshotResponse response = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, "snapshot-2") .setWaitForCompletion(true) .get(); assertThat(response.status(), equalTo(RestStatus.OK)); @@ -98,7 +105,7 @@ public void testCreateSnapshotWithIndexBlocks() { try { enableIndexBlock(INDEX_NAME, SETTING_READ_ONLY); assertThat( - clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, "snapshot-1") .setIndices(COMMON_INDEX_NAME_MASK) .setWaitForCompletion(true) .get() @@ -113,7 +120,7 @@ public void testCreateSnapshotWithIndexBlocks() { try { enableIndexBlock(INDEX_NAME, SETTING_BLOCKS_READ); assertThat( - clusterAdmin().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, "snapshot-2") .setIndices(COMMON_INDEX_NAME_MASK) .setWaitForCompletion(true) .get() @@ -129,7 +136,7 @@ public void testDeleteSnapshotWithBlocks() { logger.info("--> deleting a snapshot is allowed when the cluster is read only"); try { setClusterReadOnly(true); - assertTrue(clusterAdmin().prepareDeleteSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME).get().isAcknowledged()); + assertTrue(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, SNAPSHOT_NAME).get().isAcknowledged()); } finally { setClusterReadOnly(false); } @@ -143,13 +150,16 @@ public void testRestoreSnapshotWithBlocks() { logger.info("--> restoring a snapshot is blocked when the cluster is read only"); try { setClusterReadOnly(true); - assertBlocked(clusterAdmin().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), Metadata.CLUSTER_READ_ONLY_BLOCK); + assertBlocked( + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, SNAPSHOT_NAME), + Metadata.CLUSTER_READ_ONLY_BLOCK + ); } finally { setClusterReadOnly(false); } logger.info("--> creating a snapshot is allowed when the cluster is not read only"); - RestoreSnapshotResponse response = clusterAdmin().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) + RestoreSnapshotResponse response = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME, SNAPSHOT_NAME) .setWaitForCompletion(true) .get(); assertThat(response.status(), equalTo(RestStatus.OK)); @@ -161,7 +171,7 @@ public void testGetSnapshotWithBlocks() { // This test checks that the Get Snapshot operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(REPOSITORY_NAME).get(); + GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME).get(); assertThat(response.getSnapshots(), hasSize(1)); assertThat(response.getSnapshots().get(0).snapshotId().getName(), equalTo(SNAPSHOT_NAME)); } finally { @@ -173,7 +183,9 @@ public void testSnapshotStatusWithBlocks() { // This test checks that the Snapshot Status operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(REPOSITORY_NAME).setSnapshots(SNAPSHOT_NAME).get(); + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, REPOSITORY_NAME) + .setSnapshots(SNAPSHOT_NAME) + .get(); assertThat(response.getSnapshots(), hasSize(1)); assertThat(response.getSnapshots().get(0).getState().completed(), equalTo(true)); } finally { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java index c024d7ffb9772..aaf663c8c5b24 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/DesiredNodesSnapshotsIT.java @@ -43,7 +43,7 @@ public void testDesiredNodesAreNotIncludedInSnapshotsClusterState() { final var desiredNodesAfterSnapshot = getLatestDesiredNodes(); - clusterAdmin().prepareRestoreSnapshot(repositoryName, snapshotName).setRestoreGlobalState(true).get(); + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName).setRestoreGlobalState(true).get(); final var desiredNodesAfterRestore = getLatestDesiredNodes(); assertThat(desiredNodesAfterRestore.historyID(), is(equalTo(desiredNodesAfterSnapshot.historyID()))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java index bb9324dd7d10c..8b551e00caeeb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/ShardRoutingRoleIT.java @@ -327,12 +327,18 @@ public void testShardCreation() throws Exception { // restoring the index from a snapshot may change the number of indexing replicas because the routing table is created afresh var repoPath = randomRepoPath(); assertAcked( - clusterAdmin().preparePutRepository("repo").setType("fs").setSettings(Settings.builder().put("location", repoPath)) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo") + .setType("fs") + .setSettings(Settings.builder().put("location", repoPath)) ); assertEquals( SnapshotState.SUCCESS, - clusterAdmin().prepareCreateSnapshot("repo", "snap").setWaitForCompletion(true).get().getSnapshotInfo().state() + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") + .setWaitForCompletion(true) + .get() + .getSnapshotInfo() + .state() ); if (randomBoolean()) { @@ -348,7 +354,7 @@ public void testShardCreation() throws Exception { assertEquals( 0, - clusterAdmin().prepareRestoreSnapshot("repo", "snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") .setIndices(INDEX_NAME) .setIndexSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, routingTableWatcher.numReplicas)) .setWaitForCompletion(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 16be816b69bc4..a1a29468cc5bd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -106,7 +106,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti ensureStableCluster(3); assertAcked( - clusterAdmin().preparePutRepository("repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo") .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) ); @@ -130,7 +130,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti createIndex(indexName, indexSettings(6, 0).put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms").build()); var shardSizes = createReasonableSizedShards(indexName); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("repo", "snap") + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") .setWaitForCompletion(true) .get(); final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); @@ -145,7 +145,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti getTestFileStore(dataNodeName).setTotalSpace(shardSizes.getSmallestShardSize() + WATERMARK_BYTES - 1L); refreshDiskUsage(); - final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("repo", "snap") + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") .setWaitForCompletion(true) .get(); final RestoreInfo restoreInfo = restoreSnapshotResponse.getRestoreInfo(); @@ -179,7 +179,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShard ensureStableCluster(3); assertAcked( - clusterAdmin().preparePutRepository("repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo") .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) ); @@ -203,7 +203,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShard createIndex(indexName, indexSettings(6, 0).put(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), "0ms").build()); var shardSizes = createReasonableSizedShards(indexName); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("repo", "snap") + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") .setWaitForCompletion(true) .get(); final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); @@ -219,7 +219,7 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermarkWithMultipleShard getTestFileStore(dataNodeName).setTotalSpace(usableSpace + WATERMARK_BYTES); refreshDiskUsage(); - final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("repo", "snap") + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") .setWaitForCompletion(true) .get(); final RestoreInfo restoreInfo = restoreSnapshotResponse.getRestoreInfo(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java index 8afcaccaf9e77..cab7b7df00fe9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java @@ -70,11 +70,14 @@ public void testIsGreenDuringSnapshotRestore() { var repositoryName = "repository"; var snapshotName = randomIdentifier(); assertAcked( - clusterAdmin().preparePutRepository(repositoryName) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) .setType("fs") .setSettings(Settings.builder().put("location", randomRepoPath())) ); - clusterAdmin().prepareCreateSnapshot(repositoryName, snapshotName).setIndices(index).setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) + .setIndices(index) + .setWaitForCompletion(true) + .get(); if (randomBoolean()) { assertAcked(indicesAdmin().prepareDelete(index)); } else { @@ -83,7 +86,10 @@ public void testIsGreenDuringSnapshotRestore() { ensureGreen(); assertHealthDuring(equalTo(GREEN), () -> { - clusterAdmin().prepareRestoreSnapshot(repositoryName, snapshotName).setIndices(index).setWaitForCompletion(true).get(); + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) + .setIndices(index) + .setWaitForCompletion(true) + .get(); ensureGreen(index); }); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java index 2bc6856479ab7..3202f5513e9ac 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java @@ -253,7 +253,13 @@ public void testRestoreSnapshotOverLimit() { repoSettings.put("compress", randomBoolean()); repoSettings.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); - assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(repoSettings.build())); + assertAcked( + client.admin() + .cluster() + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") + .setType("fs") + .setSettings(repoSettings.build()) + ); int dataNodes = clusterAdmin().prepareState().get().getState().getNodes().getDataNodes().size(); ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); @@ -270,7 +276,7 @@ public void testRestoreSnapshotOverLimit() { logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("snapshot-index") .get(); @@ -282,7 +288,7 @@ public void testRestoreSnapshotOverLimit() { List snapshotInfos = client.admin() .cluster() - .prepareGetSnapshots("test-repo") + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .get() .getSnapshots(); @@ -310,7 +316,7 @@ public void testRestoreSnapshotOverLimit() { try { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("snapshot-index") .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java index 526921fdc95ba..6a8806ca26526 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java @@ -104,7 +104,7 @@ public void clusterChanged(ClusterChangedEvent event) { logger.info("--> starting snapshot"); ActionFuture future = client(masterNode1).admin() .cluster() - .prepareCreateSnapshot("test-repo", snapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", snapshot) .setWaitForCompletion(true) .setIndices(idxName) .execute(); @@ -163,7 +163,7 @@ public void testDisruptionAfterShardFinalization() throws Exception { logger.info("--> starting snapshot"); ActionFuture future = client(masterNode).admin() .cluster() - .prepareCreateSnapshot(repoName, snapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot) .setWaitForCompletion(true) .execute(); @@ -193,7 +193,7 @@ public void testDisruptionAfterShardFinalization() throws Exception { blockMasterFromFinalizingSnapshotOnIndexFile(repoName); final ActionFuture snapshotFuture = client(masterNode).admin() .cluster() - .prepareCreateSnapshot(repoName, "snapshot-2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-2") .setWaitForCompletion(true) .execute(); waitForBlock(masterNode, repoName); @@ -203,14 +203,14 @@ public void testDisruptionAfterShardFinalization() throws Exception { logger.info("--> create a snapshot expected to be successful"); final CreateSnapshotResponse successfulSnapshot = client(masterNode).admin() .cluster() - .prepareCreateSnapshot(repoName, "snapshot-2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-2") .setWaitForCompletion(true) .get(); final SnapshotInfo successfulSnapshotInfo = successfulSnapshot.getSnapshotInfo(); assertThat(successfulSnapshotInfo.state(), is(SnapshotState.SUCCESS)); logger.info("--> making sure snapshot delete works out cleanly"); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, "snapshot-2").get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-2").get()); } public void testMasterFailOverDuringShardSnapshots() throws Exception { @@ -230,7 +230,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception { final ActionFuture snapshotResponse = internalCluster().masterClient() .admin() .cluster() - .prepareCreateSnapshot(repoName, "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap") .setWaitForCompletion(true) .execute(); @@ -256,7 +256,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception { private void assertSnapshotExists(String repository, String snapshot) { GetSnapshotsResponse snapshotsStatusResponse = dataNodeClient().admin() .cluster() - .prepareGetSnapshots(repository) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repository) .setSnapshots(snapshot) .get(); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index 2fed0a45032a9..4bd8fadc93095 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -554,7 +554,7 @@ public void testCorruptFileThenSnapshotAndRestore() throws InterruptedException, // it snapshots and that will write a new segments.X+1 file logger.info("--> creating repository"); assertAcked( - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("fs") .setSettings( Settings.builder() @@ -564,10 +564,11 @@ public void testCorruptFileThenSnapshotAndRestore() throws InterruptedException, ) ); logger.info("--> snapshot"); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .setIndices("test") - .get(); + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).setIndices("test").get(); final SnapshotState snapshotState = createSnapshotResponse.getSnapshotInfo().state(); logger.info("--> snapshot terminated with state " + snapshotState); final List files = listShardFiles(shardRouting); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index d1462ef8da3dc..267af6cc1fb01 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -247,12 +247,13 @@ public void testSpecifiedIndexUnavailableSnapshotRestore() throws Exception { ensureGreen("test1"); waitForRelocation(); - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository("dummy-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .get(); + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "dummy-repo" + ).setType("fs").setSettings(Settings.builder().put("location", randomRepoPath())).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - clusterAdmin().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "dummy-repo", "snap1").setWaitForCompletion(true).get(); verify(snapshot("snap2", "test1", "test2"), true); verify(restore("snap1", "test1", "test2"), true); @@ -364,12 +365,13 @@ public void testWildcardBehaviourSnapshotRestore() throws Exception { ensureGreen("foobar"); waitForRelocation(); - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository("dummy-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .get(); + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "dummy-repo" + ).setType("fs").setSettings(Settings.builder().put("location", randomRepoPath())).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - clusterAdmin().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "dummy-repo", "snap1").setWaitForCompletion(true).get(); IndicesOptions options = IndicesOptions.fromOptions(false, false, true, false); verify(snapshot("snap2", "foo*", "bar*").setIndicesOptions(options), true); @@ -656,11 +658,13 @@ static GetSettingsRequestBuilder getSettings(String... indices) { } private static CreateSnapshotRequestBuilder snapshot(String name, String... indices) { - return clusterAdmin().prepareCreateSnapshot("dummy-repo", name).setWaitForCompletion(true).setIndices(indices); + return clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "dummy-repo", name) + .setWaitForCompletion(true) + .setIndices(indices); } private static RestoreSnapshotRequestBuilder restore(String name, String... indices) { - return clusterAdmin().prepareRestoreSnapshot("dummy-repo", name) + return clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "dummy-repo", name) .setRenamePattern("(.+)") .setRenameReplacement("$1-copy-" + name) .setWaitForCompletion(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 4f15b82ca1f16..676f8185ecb84 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -861,7 +861,7 @@ public void testSnapshotRecovery() throws Exception { indicesAdmin().prepareClose(INDEX_NAME).get(); logger.info("--> restore"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, SNAP_NAME) + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, SNAP_NAME) .setWaitForCompletion(true) .get(); int totalShards = restoreSnapshotResponse.getRestoreInfo().totalShards(); @@ -1976,7 +1976,7 @@ private void assertGlobalCheckpointIsStableAndSyncedInAllNodes(String indexName, private void createRepository(boolean enableSnapshotPeerRecoveries) { assertAcked( - clusterAdmin().preparePutRepository(REPO_NAME) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, REPO_NAME) .setType("fs") .setSettings( Settings.builder() @@ -1988,7 +1988,7 @@ private void createRepository(boolean enableSnapshotPeerRecoveries) { } private CreateSnapshotResponse createSnapshot(String indexName) { - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, SNAP_NAME) + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, SNAP_NAME) .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -1999,7 +1999,7 @@ private CreateSnapshotResponse createSnapshot(String indexName) { ); assertThat( - clusterAdmin().prepareGetSnapshots(REPO_NAME).setSnapshots(SNAP_NAME).get().getSnapshots().get(0).state(), + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME).setSnapshots(SNAP_NAME).get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS) ); return createSnapshotResponse; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java index 212cf7510d349..2f336f25c3cab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java @@ -243,7 +243,7 @@ public void testFailingReposAreTreatedAsNonExistingShardSnapshots() throws Excep ); assertAcked( - clusterAdmin().preparePutRepository(failingRepo.v1()) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, failingRepo.v1()) .setType(FailingRepoPlugin.TYPE) .setVerify(false) .setSettings(Settings.builder().put(repoFailureType, true).put("location", failingRepo.v2())) @@ -290,7 +290,7 @@ private ShardId getShardIdForIndex(String indexName) { private void createRepository(String repositoryName, String type, Path location, boolean recoveryEnabledRepo) { assertAcked( - clusterAdmin().preparePutRepository(repositoryName) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) .setType(type) .setVerify(false) .setSettings( @@ -302,6 +302,9 @@ private void createRepository(String repositoryName, String type, Path location, } private void createSnapshot(String repoName, String snapshotName, String index) { - clusterAdmin().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).setIndices(index).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .setIndices(index) + .get(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index ea2c221c8c4a4..6c7bcd17af1f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -145,7 +145,7 @@ public void testGetShardSnapshotReturnsTheLatestSuccessfulSnapshot() throws Exce if (useBwCFormat) { // Reload the RepositoryData so we don't use cached data that wasn't serialized - assertAcked(clusterAdmin().prepareDeleteRepository(repoName).get()); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).get()); createRepository(repoName, "fs", repoPath); } @@ -176,10 +176,11 @@ public void testGetShardSnapshotWhileThereIsARunningSnapshot() throws Exception blockAllDataNodes(fsRepoName); final String snapshotName = "snap-1"; - final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot(fsRepoName, snapshotName) - .setIndices(indexName) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + fsRepoName, + snapshotName + ).setIndices(indexName).setWaitForCompletion(true).execute(); waitForBlockOnAnyDataNode(fsRepoName); @@ -292,7 +293,7 @@ public void testFailedSnapshotsAreNotReturned() throws Exception { ((MockRepository) repositoriesService.repository(repoName)).setBlockAndFailOnWriteSnapFiles(); } - clusterAdmin().prepareCreateSnapshot(repoName, "snap") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snap") .setIndices(indexName) .setWaitForCompletion(false) .setFeatureStates(NO_FEATURE_STATES_VALUE) @@ -341,9 +342,9 @@ private PlainActionFuture getLatestSnapshotForShardFut PlainActionFuture future = new PlainActionFuture<>(); final GetShardSnapshotRequest request; if (useAllRepositoriesRequest && randomBoolean()) { - request = GetShardSnapshotRequest.latestSnapshotInAllRepositories(shardId); + request = GetShardSnapshotRequest.latestSnapshotInAllRepositories(TEST_REQUEST_TIMEOUT, shardId); } else { - request = GetShardSnapshotRequest.latestSnapshotInRepositories(shardId, repositories); + request = GetShardSnapshotRequest.latestSnapshotInRepositories(TEST_REQUEST_TIMEOUT, shardId, repositories); } client().execute(TransportGetShardSnapshotAction.TYPE, request, future); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java index f931eb717457d..d2f567567957c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java @@ -106,7 +106,7 @@ public void testCreateInvalidRepository() throws Exception { // verification should fail with some node has InvalidRepository final var expectedException = expectThrows( RepositoryVerificationException.class, - clusterAdmin().prepareVerifyRepository(repositoryName) + clusterAdmin().prepareVerifyRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) ); for (Throwable suppressed : expectedException.getSuppressed()) { Throwable outerCause = suppressed.getCause(); @@ -130,16 +130,27 @@ public void testCreateInvalidRepository() throws Exception { // put repository again: let all node can create repository successfully createRepository(repositoryName, UnstableRepository.TYPE, Settings.builder().put("location", randomRepoPath())); // verification should succeed with all node create repository successfully - VerifyRepositoryResponse verifyRepositoryResponse = clusterAdmin().prepareVerifyRepository(repositoryName).get(); + VerifyRepositoryResponse verifyRepositoryResponse = clusterAdmin().prepareVerifyRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + repositoryName + ).get(); assertEquals(verifyRepositoryResponse.getNodes().size(), internalCluster().numDataAndMasterNodes()); } private void createRepository(String name, String type, Settings.Builder settings) { // create - assertAcked(clusterAdmin().preparePutRepository(name).setType(type).setVerify(false).setSettings(settings).get()); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name) + .setType(type) + .setVerify(false) + .setSettings(settings) + .get() + ); // get - final GetRepositoriesResponse updatedGetRepositoriesResponse = clusterAdmin().prepareGetRepositories(name).get(); + final GetRepositoriesResponse updatedGetRepositoriesResponse = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, name) + .get(); // assert assertThat(updatedGetRepositoriesResponse.repositories(), hasSize(1)); final RepositoryMetadata updatedRepositoryMetadata = updatedGetRepositoriesResponse.repositories().get(0); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java index 76f3ca328d222..1b536aa9be982 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/RepositoriesServiceIT.java @@ -46,11 +46,17 @@ public void testUpdateRepository() { final Settings.Builder repoSettings = Settings.builder().put("location", randomRepoPath()); - assertAcked(client.admin().cluster().preparePutRepository(repositoryName).setType(FsRepository.TYPE).setSettings(repoSettings)); + assertAcked( + client.admin() + .cluster() + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) + .setType(FsRepository.TYPE) + .setSettings(repoSettings) + ); final GetRepositoriesResponse originalGetRepositoriesResponse = client.admin() .cluster() - .prepareGetRepositories(repositoryName) + .prepareGetRepositories(TEST_REQUEST_TIMEOUT, repositoryName) .get(); assertThat(originalGetRepositoriesResponse.repositories(), hasSize(1)); @@ -64,11 +70,17 @@ public void testUpdateRepository() { final boolean updated = randomBoolean(); final String updatedRepositoryType = updated ? "mock" : FsRepository.TYPE; - assertAcked(client.admin().cluster().preparePutRepository(repositoryName).setType(updatedRepositoryType).setSettings(repoSettings)); + assertAcked( + client.admin() + .cluster() + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) + .setType(updatedRepositoryType) + .setSettings(repoSettings) + ); final GetRepositoriesResponse updatedGetRepositoriesResponse = client.admin() .cluster() - .prepareGetRepositories(repositoryName) + .prepareGetRepositories(TEST_REQUEST_TIMEOUT, repositoryName) .get(); assertThat(updatedGetRepositoriesResponse.repositories(), hasSize(1)); @@ -82,6 +94,12 @@ public void testUpdateRepository() { // check that a noop update does not verify. Since the new data node does not share the same `path.repo`, verification will fail if // it runs. internalCluster().startDataOnlyNode(Settings.builder().put(Environment.PATH_REPO_SETTING.getKey(), createTempDir()).build()); - assertAcked(client.admin().cluster().preparePutRepository(repositoryName).setType(updatedRepositoryType).setSettings(repoSettings)); + assertAcked( + client.admin() + .cluster() + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) + .setType(updatedRepositoryType) + .setSettings(repoSettings) + ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java index bf937a9d57f02..01b01fdf5fcde 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java @@ -59,7 +59,10 @@ public void testRepeatCleanupsDontRemove() throws Exception { final ActionFuture cleanupFuture = startBlockedCleanup("test-repo"); logger.info("--> sending another cleanup"); - assertFutureThrows(clusterAdmin().prepareCleanupRepository("test-repo").execute(), IllegalStateException.class); + assertFutureThrows( + clusterAdmin().prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo").execute(), + IllegalStateException.class + ); logger.info("--> ensure cleanup is still in progress"); final RepositoryCleanupInProgress cleanup = clusterAdmin().prepareState().get().getState().custom(RepositoryCleanupInProgress.TYPE); @@ -85,7 +88,7 @@ private ActionFuture startBlockedCleanup(String repoN createRepository(repoName, "mock"); logger.info("--> snapshot"); - clusterAdmin().prepareCreateSnapshot(repoName, "test-snap").setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap").setWaitForCompletion(true).get(); final BlobStoreRepository repository = getRepositoryOnMaster(repoName); @@ -111,7 +114,7 @@ private ActionFuture startBlockedCleanup(String repoN final ActionFuture future = internalCluster().nonMasterClient() .admin() .cluster() - .prepareCleanupRepository(repoName) + .prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .execute(); final String masterNode = internalCluster().getMasterName(); @@ -128,9 +131,11 @@ public void testCleanupOldIndexN() throws ExecutionException, InterruptedExcepti logger.info("--> create three snapshots"); for (int i = 0; i < 3; ++i) { - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, "test-snap-" + i) - .setWaitForCompletion(true) - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + "test-snap-" + i + ).setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().state(), is(SnapshotState.SUCCESS)); } @@ -158,7 +163,7 @@ public void testCleanupOldIndexN() throws ExecutionException, InterruptedExcepti } logger.info("--> cleanup repository"); - clusterAdmin().prepareCleanupRepository(repoName).get(); + clusterAdmin().prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).get(); BlobStoreTestUtil.assertConsistency(repository); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java index 6c8f4c04e2a75..a9f8e0563c1ff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java @@ -65,15 +65,15 @@ public void testSnapshotOperationPurposes() throws Exception { } final var timeout = TimeValue.timeValueSeconds(10); - clusterAdmin().prepareCleanupRepository(repoName).get(timeout); - clusterAdmin().prepareCloneSnapshot(repoName, "snap-0", "clone-0").setIndices("index-0").get(timeout); + clusterAdmin().prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).get(timeout); + clusterAdmin().prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snap-0", "clone-0").setIndices("index-0").get(timeout); // restart to ensure that the reads which happen when starting a node on a nonempty repository use the expected purposes internalCluster().fullRestart(); - clusterAdmin().prepareGetSnapshots(repoName).get(timeout); + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get(timeout); - clusterAdmin().prepareRestoreSnapshot(repoName, "clone-0") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, "clone-0") .setRenamePattern("index-0") .setRenameReplacement("restored-0") .setWaitForCompletion(true) @@ -83,7 +83,7 @@ public void testSnapshotOperationPurposes() throws Exception { assertTrue(startDeleteSnapshot(repoName, "snap-" + i).get(10, TimeUnit.SECONDS).isAcknowledged()); } - clusterAdmin().prepareDeleteRepository(repoName).get(timeout); + clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).get(timeout); } public static class TestPlugin extends Plugin implements RepositoryPlugin { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java index fa5d8d93c9e45..1ca2526b53dff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java @@ -137,7 +137,7 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo final var reposResponse = client().execute( GetRepositoriesAction.INSTANCE, - new GetRepositoriesRequest(new String[] { "repo", "repo1" }) + new GetRepositoriesRequest(TEST_REQUEST_TIMEOUT, new String[] { "repo", "repo1" }) ).get(); assertThat( @@ -204,7 +204,10 @@ private void assertClusterStateNotSaved(CountDownLatch savedClusterState, Atomic "[err-repo] missing", expectThrows( RepositoryMissingException.class, - client().execute(GetRepositoriesAction.INSTANCE, new GetRepositoriesRequest(new String[] { "err-repo" })) + client().execute( + GetRepositoriesAction.INSTANCE, + new GetRepositoriesRequest(TEST_REQUEST_TIMEOUT, new String[] { "err-repo" }) + ) ).getMessage() ); @@ -239,7 +242,7 @@ private PutRepositoryRequest sampleRestRequest(String name) throws Exception { var bis = new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)); var parser = JSON.xContent().createParser(XContentParserConfiguration.EMPTY, bis) ) { - return new PutRepositoryRequest(name).source(parser.map()); + return new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name).source(parser.map()); } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java index dfd753d02db67..049260e14100f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/SnapshotsAndFileSettingsIT.java @@ -173,7 +173,10 @@ public void testRestoreWithRemovedFileSettings() throws Exception { Files.delete(fs.watchedFile()); logger.info("--> restore global state from the snapshot"); - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).get(); + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setRestoreGlobalState(true) + .setWaitForCompletion(true) + .get(); ensureGreen(); @@ -285,7 +288,10 @@ public void testRestoreWithPersistedFileSettings() throws Exception { logger.info("--> restore global state from the snapshot"); var removedReservedState = removedReservedClusterStateListener(masterNode); - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).get(); + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setRestoreGlobalState(true) + .setWaitForCompletion(true) + .get(); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedRestoreIT.java index 2a8db5f317cfe..d3dba66055e01 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedRestoreIT.java @@ -53,10 +53,11 @@ public void testAbortedRestoreAlsoAbortFileRestores() throws Exception { failReadsAllDataNodes(repositoryName); logger.info("--> starting restore"); - final ActionFuture future = clusterAdmin().prepareRestoreSnapshot(repositoryName, snapshotName) - .setWaitForCompletion(true) - .setIndices(indexName) - .execute(); + final ActionFuture future = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repositoryName, + snapshotName + ).setWaitForCompletion(true).setIndices(indexName).execute(); assertBusy(() -> { final RecoveryResponse recoveries = indicesAdmin().prepareRecoveries(indexName) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java index bd14f913b10ef..86a4d728df787 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/AbortedSnapshotIT.java @@ -60,7 +60,10 @@ public void run() { snapshotExecutor.execute(new BlockingTask()); safeAwait(barrier); // wait for snapshot thread to be blocked - clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-1").setWaitForCompletion(false).setPartial(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-1") + .setWaitForCompletion(false) + .setPartial(true) + .get(); // resulting cluster state has been applied on all nodes, which means the first task for the SNAPSHOT pool is queued up final var snapshot = SnapshotsInProgress.get(clusterService.state()).forRepo(repoName).get(0).snapshot(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java index df4d52727384f..ed0e226fc377b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/BlobStoreIncrementalityIT.java @@ -112,7 +112,7 @@ public void testIncrementalBehaviorOnPrimaryFailover() throws InterruptedExcepti final String snapshot3 = "snap-3"; logger.info("--> creating snapshot 3"); - clusterAdmin().prepareCreateSnapshot(repo, snapshot3).setIndices(indexName).setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot3).setIndices(indexName).setWaitForCompletion(true).get(); logger.info("--> Shutting down new primary node [{}]", newPrimary); stopNode(newPrimary); @@ -120,7 +120,7 @@ public void testIncrementalBehaviorOnPrimaryFailover() throws InterruptedExcepti final String snapshot4 = "snap-4"; logger.info("--> creating snapshot 4"); - clusterAdmin().prepareCreateSnapshot(repo, snapshot4).setIndices(indexName).setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot4).setIndices(indexName).setWaitForCompletion(true).get(); assertTwoIdenticalShardSnapshots(repo, indexName, snapshot3, snapshot4); @@ -156,7 +156,7 @@ public void testForceMergeCausesFullSnapshot() throws Exception { createRepository(repo, "fs"); logger.info("--> creating snapshot 1"); - clusterAdmin().prepareCreateSnapshot(repo, snapshot1).setIndices(indexName).setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot1).setIndices(indexName).setWaitForCompletion(true).get(); logger.info("--> force merging down to a single segment"); final BroadcastResponse forceMergeResponse = indicesAdmin().prepareForceMerge(indexName).setMaxNumSegments(1).setFlush(true).get(); @@ -164,7 +164,7 @@ public void testForceMergeCausesFullSnapshot() throws Exception { final String snapshot2 = "snap-2"; logger.info("--> creating snapshot 2"); - clusterAdmin().prepareCreateSnapshot(repo, snapshot2).setIndices(indexName).setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot2).setIndices(indexName).setWaitForCompletion(true).get(); logger.info("--> asserting that the two snapshots refer to different files in the repository"); final SnapshotStats secondSnapshotShardStatus = getStats(repo, snapshot2).getIndices().get(indexName).getShards().get(0).getStats(); @@ -220,7 +220,7 @@ public void testRecordCorrectSegmentCountsWithBackgroundMerges() throws Exceptio }, 30L, TimeUnit.SECONDS); final SnapshotInfo after = createFullSnapshot(repoName, "snapshot-after"); - final int incrementalFileCount = clusterAdmin().prepareSnapshotStatus() + final int incrementalFileCount = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) .setRepository(repoName) .setSnapshots(after.snapshotId().getName()) .get() @@ -252,12 +252,12 @@ private void assertTwoIdenticalShardSnapshots(String repo, String indexName, Str } private SnapshotStatus getStats(String repository, String snapshot) { - return clusterAdmin().prepareSnapshotStatus(repository).setSnapshots(snapshot).get().getSnapshots().get(0); + return clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repository).setSnapshots(snapshot).get().getSnapshots().get(0); } private void ensureRestoreSingleShardSuccessfully(String repo, String indexName, String snapshot, String indexSuffix) { logger.info("--> restoring [{}]", snapshot); - final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repo, snapshot) + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot) .setIndices(indexName) .setRenamePattern("(.+)") .setRenameReplacement("$1" + indexSuffix) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java index ca06dcea88766..a16a19f66085b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java @@ -133,7 +133,7 @@ public void testCloneSnapshotIndex() throws Exception { final String targetSnapshot = "target-snapshot"; assertAcked(startClone(repoName, sourceSnapshot, targetSnapshot, indexName).get()); - final List status = clusterAdmin().prepareSnapshotStatus(repoName) + final List status = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName) .setSnapshots(sourceSnapshot, targetSnapshot) .get() .getSnapshots(); @@ -171,7 +171,7 @@ public void testClonePreventsSnapshotDelete() throws Exception { unblockNode(repoName, masterName); assertAcked(cloneFuture.get()); - final List status = clusterAdmin().prepareSnapshotStatus(repoName) + final List status = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName) .setSnapshots(sourceSnapshot, targetSnapshot) .get() .getSnapshots(); @@ -226,7 +226,10 @@ public void testLongRunningCloneAllowsConcurrentSnapshot() throws Exception { createIndexWithRandomDocs(indexFast, randomIntBetween(20, 100)); assertSuccessful( - clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot").setIndices(indexFast).setWaitForCompletion(true).execute() + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "fast-snapshot") + .setIndices(indexFast) + .setWaitForCompletion(true) + .execute() ); assertThat(cloneFuture.isDone(), is(false)); @@ -250,10 +253,11 @@ public void testLongRunningSnapshotAllowsConcurrentClone() throws Exception { createIndexWithRandomDocs(indexFast, randomIntBetween(20, 100)); blockDataNode(repoName, dataNode); - final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot") - .setIndices(indexFast) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshotFuture = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + "fast-snapshot" + ).setIndices(indexFast).setWaitForCompletion(true).execute(); waitForBlock(dataNode, repoName); final String targetSnapshot = "target-snapshot"; @@ -467,7 +471,7 @@ public void testDoesNotStartOnBrokenSourceSnapshot() throws Exception { final Client masterClient = internalCluster().masterClient(); final ActionFuture sourceSnapshotFuture = masterClient.admin() .cluster() - .prepareCreateSnapshot(repoName, sourceSnapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, sourceSnapshot) .setWaitForCompletion(true) .execute(); awaitNumberOfSnapshotsInProgress(1); @@ -500,7 +504,7 @@ public void testSnapshotQueuedAfterCloneFromBrokenSourceSnapshot() throws Except final Client masterClient = internalCluster().masterClient(); final ActionFuture sourceSnapshotFuture = masterClient.admin() .cluster() - .prepareCreateSnapshot(repoName, sourceSnapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, sourceSnapshot) .setWaitForCompletion(true) .execute(); awaitNumberOfSnapshotsInProgress(1); @@ -802,13 +806,13 @@ public void testCloneAfterFailedShardSnapshot() throws Exception { blockDataNode(repoName, dataNode); final ActionFuture snapshotFuture = client(masterNode).admin() .cluster() - .prepareCreateSnapshot(repoName, "full-snapshot") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "full-snapshot") .execute(); awaitNumberOfSnapshotsInProgress(1); waitForBlock(dataNode, repoName); final ActionFuture cloneFuture = client(masterNode).admin() .cluster() - .prepareCloneSnapshot(repoName, sourceSnapshot, "target-snapshot") + .prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repoName, sourceSnapshot, "target-snapshot") .setIndices(testIndex) .execute(); awaitNumberOfSnapshotsInProgress(2); @@ -842,7 +846,11 @@ private static ActionFuture startClone( String targetSnapshot, String... indices ) { - return client.admin().cluster().prepareCloneSnapshot(repoName, sourceSnapshot, targetSnapshot).setIndices(indices).execute(); + return client.admin() + .cluster() + .prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repoName, sourceSnapshot, targetSnapshot) + .setIndices(indices) + .execute(); } private void blockMasterOnReadIndexMeta(String repoName) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index 1152cf5f03e5a..e6b2b86d0dbeb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -115,7 +115,10 @@ public void testLongRunningSnapshotAllowsConcurrentSnapshot() throws Exception { createIndexWithContent(indexFast, dataNode2, dataNode); assertSuccessful( - clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot").setIndices(indexFast).setWaitForCompletion(true).execute() + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "fast-snapshot") + .setIndices(indexFast) + .setWaitForCompletion(true) + .execute() ); assertThat(createSlowFuture.isDone(), is(false)); @@ -140,7 +143,10 @@ public void testRecreateCorruptedRepositoryDuringSnapshotsFails() throws Excepti final String indexFast = "index-fast"; createIndexWithContent(indexFast, fastDataNode, slowDataNode); assertSuccessful( - clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot").setIndices(indexFast).setWaitForCompletion(true).execute() + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "fast-snapshot") + .setIndices(indexFast) + .setWaitForCompletion(true) + .execute() ); logger.info("--> corrupting the repository by moving index-N blob to next generation"); @@ -153,7 +159,9 @@ public void testRecreateCorruptedRepositoryDuringSnapshotsFails() throws Excepti logger.info("--> trying to create another snapshot in order for repository to be marked as corrupt"); final SnapshotException snapshotException = expectThrows( SnapshotException.class, - clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot2").setIndices(indexFast).setWaitForCompletion(true) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "fast-snapshot2") + .setIndices(indexFast) + .setWaitForCompletion(true) ); assertThat(snapshotException.getMessage(), containsString("failed to update snapshot in repository")); assertEquals(RepositoryData.CORRUPTED_REPO_GEN, getRepositoryMetadata(repoName).generation()); @@ -206,7 +214,7 @@ public void testDeletesAreBatched() throws Exception { } snapshotNames.removeAll(toDelete); final ListenableFuture future = new ListenableFuture<>(); - clusterAdmin().prepareDeleteSnapshot(repoName, toDelete.toArray(Strings.EMPTY_ARRAY)).execute(future); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, toDelete.toArray(Strings.EMPTY_ARRAY)).execute(future); deleteFutures.add(future); } @@ -246,7 +254,10 @@ public void testBlockedRepoDoesNotBlockOtherRepos() throws Exception { final ActionFuture createSlowFuture = startAndBlockFailingFullSnapshot(blockedRepoName, "blocked-snapshot"); - clusterAdmin().prepareCreateSnapshot(otherRepoName, "snapshot").setIndices("does-not-exist-*").setWaitForCompletion(false).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, otherRepoName, "snapshot") + .setIndices("does-not-exist-*") + .setWaitForCompletion(false) + .get(); unblockNode(blockedRepoName, internalCluster().getMasterName()); expectThrows(SnapshotException.class, createSlowFuture); @@ -382,10 +393,11 @@ public void testAbortOneOfMultipleSnapshots() throws Exception { awaitNDeletionsInProgress(1); logger.info("--> start third snapshot"); - final ActionFuture thirdSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-three") - .setIndices(secondIndex) - .setWaitForCompletion(true) - .execute(); + final ActionFuture thirdSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + "snapshot-three" + ).setIndices(secondIndex).setWaitForCompletion(true).execute(); assertThat(firstSnapshotResponse.isDone(), is(false)); assertThat(secondSnapshotResponse.isDone(), is(false)); @@ -402,7 +414,7 @@ public void testAbortOneOfMultipleSnapshots() throws Exception { logger.info("--> verify that the first snapshot is gone"); assertThat( - clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get().getSnapshots(), containsInAnyOrder(secondSnapshotInfo, thirdSnapshotInfo) ); } @@ -468,7 +480,7 @@ public void testCascadedAborts() throws Exception { assertAcked(allDeletedResponse.get()); logger.info("--> verify that all snapshots are gone"); - assertThat(clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); + assertThat(clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get().getSnapshots(), empty()); } public void testMasterFailOverWithQueuedDeletes() throws Exception { @@ -552,7 +564,7 @@ public void testMasterFailOverWithQueuedDeletes() throws Exception { logger.info("--> verify that all snapshots are gone and no more work is left in the cluster state"); awaitNoMoreRunningOperations(); - assertThat(clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); + assertThat(clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get().getSnapshots(), empty()); } public void testAssertMultipleSnapshotsAndPrimaryFailOver() throws Exception { @@ -611,7 +623,7 @@ public void testQueuedDeletesWithFailures() throws Exception { final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture); assertThat(snapshotException.getMessage(), containsString(SnapshotsInProgress.ABORTED_FAILURE_TEXT)); - assertThat(clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); + assertThat(clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get().getSnapshots(), empty()); } public void testQueuedDeletesWithOverlap() throws Exception { @@ -638,7 +650,7 @@ public void testQueuedDeletesWithOverlap() throws Exception { final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture); assertThat(snapshotException.getMessage(), containsString(SnapshotsInProgress.ABORTED_FAILURE_TEXT)); - assertThat(clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); + assertThat(clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get().getSnapshots(), empty()); } public void testQueuedOperationsOnMasterRestart() throws Exception { @@ -651,7 +663,7 @@ public void testQueuedOperationsOnMasterRestart() throws Exception { startAndBlockOnDeleteSnapshot(repoName, "*"); - clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-three").setWaitForCompletion(false).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-three").setWaitForCompletion(false).get(); startDeleteSnapshot(repoName, "*"); awaitNDeletionsInProgress(2); @@ -677,20 +689,20 @@ public void testQueuedOperationsOnMasterDisconnect() throws Exception { blockNodeOnAnyFiles(repoName, masterNode); ActionFuture firstDeleteFuture = client(masterNode).admin() .cluster() - .prepareDeleteSnapshot(repoName, "*") + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "*") .execute(); waitForBlock(masterNode, repoName); final ActionFuture createThirdSnapshot = client(masterNode).admin() .cluster() - .prepareCreateSnapshot(repoName, "snapshot-three") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-three") .setWaitForCompletion(true) .execute(); awaitNumberOfSnapshotsInProgress(1); final ActionFuture secondDeleteFuture = client(masterNode).admin() .cluster() - .prepareDeleteSnapshot(repoName, "*") + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "*") .execute(); awaitNDeletionsInProgress(2); @@ -733,7 +745,7 @@ public void testQueuedOperationsOnMasterDisconnectAndRepoFailure() throws Except final ActionFuture deleteFuture = client(masterNode).admin() .cluster() - .prepareDeleteSnapshot(repoName, "*") + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "*") .execute(); awaitNDeletionsInProgress(1); @@ -767,7 +779,7 @@ public void testQueuedOperationsAndBrokenRepoOnMasterFailOver() throws Exception corruptIndexN(repoPath, generation); - clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-three").setWaitForCompletion(false).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-three").setWaitForCompletion(false).get(); final ActionFuture deleteFuture = startDeleteFromNonMasterClient(repoName, "*"); awaitNDeletionsInProgress(2); @@ -975,14 +987,16 @@ public void testQueuedSnapshotsWaitingForShardReady() throws Exception { logger.info("--> start two snapshots"); final String snapshotOne = "snap-1"; final String snapshotTwo = "snap-2"; - final ActionFuture snapOneResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshotOne) - .setWaitForCompletion(false) - .setIndices(testIndex) - .execute(); - final ActionFuture snapTwoResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshotTwo) - .setWaitForCompletion(false) - .setIndices(testIndex) - .execute(); + final ActionFuture snapOneResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotOne + ).setWaitForCompletion(false).setIndices(testIndex).execute(); + final ActionFuture snapTwoResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotTwo + ).setWaitForCompletion(false).setIndices(testIndex).execute(); snapOneResponse.get(); snapTwoResponse.get(); @@ -1071,7 +1085,7 @@ public void testEquivalentDeletesAreDeduplicated() throws Exception { final int deletes = randomIntBetween(2, 10); final List> deleteResponses = new ArrayList<>(deletes); for (int i = 0; i < deletes; ++i) { - deleteResponses.add(clusterAdmin().prepareDeleteSnapshot(repoName, "*").execute()); + deleteResponses.add(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "*").execute()); } waitForBlock(masterName, repoName); awaitNDeletionsInProgress(1); @@ -1273,10 +1287,14 @@ public void testMasterFailoverAndMultipleQueuedUpSnapshotsAcrossTwoRepos() throw blockMasterFromFinalizingSnapshotOnIndexFile(repoName); blockMasterFromFinalizingSnapshotOnIndexFile(otherRepoName); - clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-blocked-1").setWaitForCompletion(false).get(); - clusterAdmin().prepareCreateSnapshot(repoName, "snapshot-blocked-2").setWaitForCompletion(false).get(); - clusterAdmin().prepareCreateSnapshot(otherRepoName, "snapshot-other-blocked-1").setWaitForCompletion(false).get(); - clusterAdmin().prepareCreateSnapshot(otherRepoName, "snapshot-other-blocked-2").setWaitForCompletion(false).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-blocked-1").setWaitForCompletion(false).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-blocked-2").setWaitForCompletion(false).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, otherRepoName, "snapshot-other-blocked-1") + .setWaitForCompletion(false) + .get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, otherRepoName, "snapshot-other-blocked-2") + .setWaitForCompletion(false) + .get(); awaitNumberOfSnapshotsInProgress(4); final String initialMaster = internalCluster().getMasterName(); @@ -1323,7 +1341,7 @@ public void testConcurrentOperationsLimit() throws Exception { final ConcurrentSnapshotExecutionException cse = expectThrows( ConcurrentSnapshotExecutionException.class, - clusterAdmin().prepareCreateSnapshot(repoName, "expected-to-fail") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "expected-to-fail") ); assertThat( cse.getMessage(), @@ -1337,7 +1355,7 @@ public void testConcurrentOperationsLimit() throws Exception { ); boolean deleteAndAbortAll = false; if (deleteFuture == null && randomBoolean()) { - deleteFuture = clusterAdmin().prepareDeleteSnapshot(repoName, "*").execute(); + deleteFuture = clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "*").execute(); deleteAndAbortAll = true; if (randomBoolean()) { awaitNDeletionsInProgress(1); @@ -1490,7 +1508,7 @@ public void testConcurrentRestoreDeleteAndClone() throws Exception { for (int i = 0; i < nbIndices; i++) { if (randomBoolean()) { restoreFutures.add( - clusterAdmin().prepareRestoreSnapshot(repository, "snapshot-" + i) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repository, "snapshot-" + i) .setIndices("index-" + i) .setRenamePattern("(.+)") .setRenameReplacement("$1-restored-" + i) @@ -1499,7 +1517,9 @@ public void testConcurrentRestoreDeleteAndClone() throws Exception { ); } else { cloneFutures.add( - clusterAdmin().prepareCloneSnapshot(repository, "snapshot-" + i, "clone-" + i).setIndices("index-" + i).execute() + clusterAdmin().prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repository, "snapshot-" + i, "clone-" + i) + .setIndices("index-" + i) + .execute() ); } } @@ -1555,15 +1575,17 @@ public void testOutOfOrderFinalization() throws Exception { blockNodeWithIndex(repository, index2); - final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-1") - .setIndices(index1, index2) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-1" + ).setIndices(index1, index2).setWaitForCompletion(true).execute(); awaitNumberOfSnapshotsInProgress(1); - final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") - .setIndices(index1) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-2" + ).setIndices(index1).setWaitForCompletion(true).execute(); assertSuccessful(snapshot2); unblockAllDataNodes(repository); final SnapshotInfo sn1 = assertSuccessful(snapshot1); @@ -1571,7 +1593,11 @@ public void testOutOfOrderFinalization() throws Exception { assertAcked(startDeleteSnapshot(repository, sn1.snapshot().getSnapshotId().getName()).get()); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("snapshot-2") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(1) ); } @@ -1589,17 +1615,19 @@ public void testOutOfOrderAndConcurrentFinalization() throws Exception { blockNodeWithIndex(repository, index2); - final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-1") - .setIndices(index1, index2) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-1" + ).setIndices(index1, index2).setWaitForCompletion(true).execute(); awaitNumberOfSnapshotsInProgress(1); blockMasterOnWriteIndexFile(repository); - final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") - .setIndices(index1) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-2" + ).setIndices(index1).setWaitForCompletion(true).execute(); awaitClusterState(state -> { final List snapshotsInProgress = SnapshotsInProgress.get(state).forRepo(repository); @@ -1616,7 +1644,11 @@ public void testOutOfOrderAndConcurrentFinalization() throws Exception { assertAcked(startDeleteSnapshot(repository, sn1.snapshot().getSnapshotId().getName()).get()); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("snapshot-2") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(1) ); } @@ -1638,26 +1670,33 @@ public void testOutOfOrderFinalizationWithConcurrentClone() throws Exception { blockNodeWithIndex(repository, index2); final String sn1 = "snapshot-1"; - final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(repository, sn1) + final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repository, sn1) .setIndices(index1, index2) .setWaitForCompletion(true) .execute(); awaitNumberOfSnapshotsInProgress(1); final String targetSnapshot = "target-snapshot"; - final ActionFuture clone = clusterAdmin().prepareCloneSnapshot(repository, sourceSnapshot, targetSnapshot) - .setIndices(index1) - .execute(); + final ActionFuture clone = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + sourceSnapshot, + targetSnapshot + ).setIndices(index1).execute(); assertAcked(clone.get()); unblockAllDataNodes(repository); assertSuccessful(snapshot1); logger.info("--> deleting snapshots [{},{}] from repo [{}]", sn1, sourceSnapshot, repository); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repository).setSnapshots(sn1, sourceSnapshot).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repository).setSnapshots(sn1, sourceSnapshot).get()); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots(targetSnapshot).setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots(targetSnapshot) + .setRepository(repository) + .get() + .getSnapshots(), hasSize(1) ); } @@ -1681,6 +1720,7 @@ public void testOutOfOrderCloneFinalization() throws Exception { final String cloneTarget = "target-snapshot"; final ActionFuture cloneSnapshot = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, repository, sourceSnapshot, cloneTarget @@ -1688,10 +1728,11 @@ public void testOutOfOrderCloneFinalization() throws Exception { awaitNumberOfSnapshotsInProgress(1); waitForBlock(master, repository); - final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") - .setIndices(index2) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-2" + ).setIndices(index2).setWaitForCompletion(true).execute(); assertSuccessful(snapshot2); unblockNode(repository, master); @@ -1699,7 +1740,11 @@ public void testOutOfOrderCloneFinalization() throws Exception { assertAcked(startDeleteSnapshot(repository, cloneTarget).get()); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("snapshot-2") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(1) ); } @@ -1726,12 +1771,18 @@ public void testCorrectlyFinalizeOutOfOrderPartialFailures() throws Exception { waitForBlock(dataNode2, repository); unblockNode(repository, dataNode1); - assertAcked(clusterAdmin().prepareCloneSnapshot(repository, "snapshot-1", "target-1").setIndices(index1).get()); + assertAcked( + clusterAdmin().prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repository, "snapshot-1", "target-1").setIndices(index1).get() + ); unblockNode(repository, dataNode2); snapshotBlocked.get(); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("target-1").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("target-1") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(1) ); @@ -1757,6 +1808,7 @@ public void testIndexDeletedWhileSnapshotQueuedAfterClone() throws Exception { final String cloneTarget = "target-snapshot"; final ActionFuture cloneSnapshot = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, repository, sourceSnapshot, cloneTarget @@ -1764,15 +1816,16 @@ public void testIndexDeletedWhileSnapshotQueuedAfterClone() throws Exception { awaitNumberOfSnapshotsInProgress(1); waitForBlock(master, repository); - final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-3") - .setIndices(index1, index2) - .setWaitForCompletion(true) - .setPartial(true) - .execute(); - final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") - .setIndices(index2) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-3" + ).setIndices(index1, index2).setWaitForCompletion(true).setPartial(true).execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-2" + ).setIndices(index2).setWaitForCompletion(true).execute(); assertSuccessful(snapshot2); awaitNumberOfSnapshotsInProgress(2); assertFalse(snapshot3.isDone()); @@ -1784,7 +1837,11 @@ public void testIndexDeletedWhileSnapshotQueuedAfterClone() throws Exception { assertAcked(startDeleteSnapshot(repository, cloneTarget).get()); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2", "snapshot-3").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("snapshot-2", "snapshot-3") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(2) ); } @@ -1808,6 +1865,7 @@ public void testIndexDeletedWhileSnapshotAndCloneQueuedAfterClone() throws Excep final String cloneTarget = "target-snapshot"; final ActionFuture cloneSnapshot = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, repository, sourceSnapshot, cloneTarget @@ -1815,21 +1873,23 @@ public void testIndexDeletedWhileSnapshotAndCloneQueuedAfterClone() throws Excep awaitNumberOfSnapshotsInProgress(1); waitForBlock(master, repository); - final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-3") - .setIndices(index1, index2) - .setWaitForCompletion(true) - .setPartial(true) - .execute(); - final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") - .setIndices(index2) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-3" + ).setIndices(index1, index2).setWaitForCompletion(true).setPartial(true).execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-2" + ).setIndices(index2).setWaitForCompletion(true).execute(); assertSuccessful(snapshot2); awaitNumberOfSnapshotsInProgress(2); assertFalse(snapshot3.isDone()); final String cloneTarget2 = "target-snapshot-2"; final ActionFuture cloneSnapshot2 = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, repository, sourceSnapshot, cloneTarget2 @@ -1844,7 +1904,11 @@ public void testIndexDeletedWhileSnapshotAndCloneQueuedAfterClone() throws Excep assertAcked(startDeleteSnapshot(repository, cloneTarget).get()); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2", "snapshot-3").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("snapshot-2", "snapshot-3") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(2) ); } @@ -1874,7 +1938,7 @@ public void testQueuedAfterFailedShardSnapshot() throws Exception { final SnapshotInfo failedSnapshot = snapshotFutureFailure.get().getSnapshotInfo(); assertEquals(SnapshotState.PARTIAL, failedSnapshot.state()); - final SnapshotsStatusResponse snapshotsStatusResponse1 = clusterAdmin().prepareSnapshotStatus(repository) + final SnapshotsStatusResponse snapshotsStatusResponse1 = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repository) .setSnapshots(fullSnapshot) .get(); @@ -1882,14 +1946,14 @@ public void testQueuedAfterFailedShardSnapshot() throws Exception { createFullSnapshot(repository, tmpSnapshot); assertAcked(startDeleteSnapshot(repository, tmpSnapshot).get()); - final SnapshotsStatusResponse snapshotsStatusResponse2 = clusterAdmin().prepareSnapshotStatus(repository) + final SnapshotsStatusResponse snapshotsStatusResponse2 = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repository) .setSnapshots(fullSnapshot) .get(); assertEquals(snapshotsStatusResponse1, snapshotsStatusResponse2); assertAcked(startDeleteSnapshot(repository, "successful-snapshot").get()); - final SnapshotsStatusResponse snapshotsStatusResponse3 = clusterAdmin().prepareSnapshotStatus(repository) + final SnapshotsStatusResponse snapshotsStatusResponse3 = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repository) .setSnapshots(fullSnapshot) .get(); assertEquals(snapshotsStatusResponse1, snapshotsStatusResponse3); @@ -1908,26 +1972,33 @@ public void testOutOfOrderFinalizationManySnapshots() throws Exception { blockNodeWithIndex(repository, index2); - final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-1") - .setIndices(index1, index2) - .setWaitForCompletion(true) - .execute(); - final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-2") - .setIndices(index1, index2) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot1 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-1" + ).setIndices(index1, index2).setWaitForCompletion(true).execute(); + final ActionFuture snapshot2 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-2" + ).setIndices(index1, index2).setWaitForCompletion(true).execute(); awaitNumberOfSnapshotsInProgress(2); - final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot(repository, "snapshot-3") - .setIndices(index1) - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshot3 = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + "snapshot-3" + ).setIndices(index1).setWaitForCompletion(true).execute(); assertSuccessful(snapshot3); unblockAllDataNodes(repository); assertSuccessful(snapshot1); assertSuccessful(snapshot2); assertThat( - clusterAdmin().prepareSnapshotStatus().setSnapshots("snapshot-2").setRepository(repository).get().getSnapshots(), + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT) + .setSnapshots("snapshot-2") + .setRepository(repository) + .get() + .getSnapshots(), hasSize(1) ); } @@ -1950,14 +2021,18 @@ public void testCloneQueuedAfterMissingShard() throws Exception { internalCluster().stopNode(dataNodes.get(0)); blockMasterOnWriteIndexFile(repository); - final ActionFuture deleteFuture = clusterAdmin().prepareDeleteSnapshot(repository, snapshotToDelete) - .execute(); + final ActionFuture deleteFuture = clusterAdmin().prepareDeleteSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + snapshotToDelete + ).execute(); awaitNDeletionsInProgress(1); final ActionFuture snapshot1 = startFullSnapshot(repository, "snapshot-1", true); awaitNumberOfSnapshotsInProgress(1); final ActionFuture cloneFuture = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, repository, cloneSource, "target-snapshot" @@ -2021,8 +2096,11 @@ public void testSnapshotAndCloneQueuedAfterMissingShard() throws Exception { internalCluster().stopNode(dataNodes.get(0)); blockMasterOnWriteIndexFile(repository); - final ActionFuture deleteFuture = clusterAdmin().prepareDeleteSnapshot(repository, snapshotToDelete) - .execute(); + final ActionFuture deleteFuture = clusterAdmin().prepareDeleteSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + snapshotToDelete + ).execute(); awaitNDeletionsInProgress(1); final ActionFuture snapshot1 = startFullSnapshot(repository, "snapshot-1", true); @@ -2032,6 +2110,7 @@ public void testSnapshotAndCloneQueuedAfterMissingShard() throws Exception { awaitNumberOfSnapshotsInProgress(2); final ActionFuture cloneFuture = clusterAdmin().prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, repository, cloneSource, "target-snapshot" @@ -2106,7 +2185,7 @@ public void testDeleteIndexWithOutOfOrderFinalization() { final var snapshotCompleters = new HashMap(); for (final var blockingIndex : List.of("index-0", "index-1", "index-2")) { final var snapshotName = "snapshot-with-" + blockingIndex; - final var snapshotFuture = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) + final var snapshotFuture = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .setPartial(true) .setIndices(indexToDelete, blockingIndex) @@ -2171,14 +2250,21 @@ public void testDeleteIndexWithOutOfOrderFinalization() { } private static void assertSnapshotStatusCountOnRepo(String otherBlockedRepoName, int count) { - final SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(otherBlockedRepoName).get(); + final SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus( + TEST_REQUEST_TIMEOUT, + otherBlockedRepoName + ).get(); final List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); assertThat(snapshotStatuses, hasSize(count)); } private ActionFuture startDeleteFromNonMasterClient(String repoName, String snapshotName) { logger.info("--> deleting snapshot [{}] from repo [{}] from non master client", snapshotName, repoName); - return internalCluster().nonMasterClient().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(); + return internalCluster().nonMasterClient() + .admin() + .cluster() + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .execute(); } private ActionFuture startFullSnapshotFromNonMasterClient(String repoName, String snapshotName) { @@ -2186,7 +2272,7 @@ private ActionFuture startFullSnapshotFromNonMasterClien return internalCluster().nonMasterClient() .admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(); } @@ -2196,7 +2282,7 @@ private ActionFuture startFullSnapshotFromDataNode(Strin return internalCluster().dataNodeClient() .admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(); } @@ -2206,7 +2292,7 @@ private ActionFuture startFullSnapshotFromMasterClient(S return internalCluster().masterClient() .admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(); } @@ -2241,7 +2327,10 @@ private void corruptIndexN(Path repoPath, long generation) throws IOException { } private static List currentSnapshots(String repoName) { - return clusterAdmin().prepareGetSnapshots(repoName).setSnapshots(GetSnapshotsRequest.CURRENT_SNAPSHOT).get().getSnapshots(); + return clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) + .setSnapshots(GetSnapshotsRequest.CURRENT_SNAPSHOT) + .get() + .getSnapshots(); } private ActionFuture startAndBlockOnDeleteSnapshot(String repoName, String snapshotName) throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index 9eb9041aa51f1..01a18a58f663c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -70,7 +70,7 @@ public void testRecreateCorruptedRepositoryUnblocksIt() throws Exception { final String snapshot = "test-snap"; logger.info("--> creating snapshot"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshot) + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot) .setWaitForCompletion(true) .setIndices("test-idx-1") .get(); @@ -87,12 +87,17 @@ public void testRecreateCorruptedRepositoryUnblocksIt() throws Exception { assertRepositoryBlocked(repoName, snapshot); logger.info("--> recreate repository with same settings in order to reset corrupted state"); - assertAcked(clusterAdmin().preparePutRepository(repoName).setType("fs").setSettings(settings)); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).setType("fs").setSettings(settings) + ); startDeleteSnapshot(repoName, snapshot).get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots(repoName).addSnapshots(snapshot)); + expectThrows( + SnapshotMissingException.class, + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).addSnapshots(snapshot) + ); } public void testConcurrentlyChangeRepositoryContents() throws Exception { @@ -118,7 +123,7 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot) .setWaitForCompletion(true) .setIndices("test-idx-*") .get(); @@ -143,13 +148,13 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { } logger.info("--> remove repository"); - assertAcked(client.admin().cluster().prepareDeleteRepository(repoName)); + assertAcked(client.admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName)); logger.info("--> recreate repository"); assertAcked( client.admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType("fs") .setSettings( Settings.builder() @@ -162,7 +167,10 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { startDeleteSnapshot(repoName, snapshot).get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots(repoName).addSnapshots(snapshot)); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).addSnapshots(snapshot) + ); } public void testFindDanglingLatestGeneration() throws Exception { @@ -184,7 +192,7 @@ public void testFindDanglingLatestGeneration() throws Exception { final String snapshot = "test-snap"; logger.info("--> creating snapshot"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshot) + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot) .setWaitForCompletion(true) .setIndices("test-idx-*") .get(); @@ -229,7 +237,10 @@ public void testFindDanglingLatestGeneration() throws Exception { assertThat(getRepositoryData(repoName).getGenId(), is(beforeMoveGen + 2)); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots(repoName).addSnapshots(snapshot)); + expectThrows( + SnapshotMissingException.class, + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).addSnapshots(snapshot) + ); } public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { @@ -252,10 +263,11 @@ public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { for (int i = 0; i < snapshots; ++i) { // Workaround to simulate BwC situation: taking a snapshot without indices here so that we don't create any new version shard // generations (the existence of which would short-circuit checks for the repo containing old version snapshots) - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshotPrefix + i) - .setIndices() - .setWaitForCompletion(true) - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotPrefix + i + ).setIndices().setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), is(0)); assertThat( createSnapshotResponse.getSnapshotInfo().successfulShards(), @@ -354,7 +366,7 @@ public void testMountCorruptedRepositoryData() throws Exception { logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot) .setWaitForCompletion(true) .setIndices("test-idx-*") .get(); @@ -373,7 +385,7 @@ public void testMountCorruptedRepositoryData() throws Exception { final String otherRepoName = "other-repo"; assertAcked( - clusterAdmin().preparePutRepository(otherRepoName) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, otherRepoName) .setType("fs") .setVerify(false) // don't try and load the repo data, since it is corrupt .setSettings(Settings.builder().put("location", repo).put("compress", false)) @@ -452,7 +464,7 @@ public void testRepairBrokenShardGenerations() throws Exception { ); logger.info("--> recreating repository to clear caches"); - clusterAdmin().prepareDeleteRepository(repoName).get(); + clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).get(); createRepository(repoName, "fs", repoPath); createFullSnapshot(repoName, "snapshot-2"); @@ -501,7 +513,7 @@ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { } logger.info("--> verifying snapshot state for [{}]", snapshot1); - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots(); + List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo(snapshot1)); @@ -510,7 +522,7 @@ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { assertAcked(indicesAdmin().prepareDelete(indexName)); logger.info("--> restoring snapshot [{}]", snapshot1); - clusterAdmin().prepareRestoreSnapshot("test-repo", snapshot1) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", snapshot1) .setRestoreGlobalState(randomBoolean()) .setWaitForCompletion(true) .get(); @@ -526,7 +538,7 @@ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { final String snapshot2 = "test-snap-2"; logger.info("--> creating snapshot [{}]", snapshot2); - final SnapshotInfo snapshotInfo2 = clusterAdmin().prepareCreateSnapshot("test-repo", snapshot2) + final SnapshotInfo snapshotInfo2 = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", snapshot2) .setWaitForCompletion(true) .get() .getSnapshotInfo(); @@ -550,7 +562,7 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setWaitForCompletion(true) .setIndices(indices) .get(); @@ -576,7 +588,10 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1")); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").addSnapshots("test-snap-1") + ); for (String index : indices) { assertTrue(Files.notExists(indicesPath.resolve(indexIds.get(index).getId()))); @@ -596,7 +611,7 @@ public void testDeleteSnapshotWithMissingMetadata() throws Exception { logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setWaitForCompletion(true) .setIndices("test-idx-*") .get(); @@ -613,7 +628,10 @@ public void testDeleteSnapshotWithMissingMetadata() throws Exception { startDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1")); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").addSnapshots("test-snap-1") + ); } public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { @@ -636,7 +654,7 @@ public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { logger.info("--> creating snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setWaitForCompletion(true) .setIndices("test-idx-*") .get(); @@ -654,12 +672,15 @@ public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { startDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1")); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").addSnapshots("test-snap-1") + ); logger.info("--> make sure that we can create the snapshot again"); createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setWaitForCompletion(true) .setIndices("test-idx-*") .get(); @@ -702,18 +723,24 @@ public void testDeleteSnapshotWithCorruptedGlobalState() throws Exception { } } - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots(); + List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); - SnapshotsStatusResponse snapshotStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get(); + SnapshotsStatusResponse snapshotStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots("test-snap") + .get(); assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); assertThat(snapshotStatusResponse.getSnapshots().get(0).getSnapshot().getSnapshotId().getName(), equalTo("test-snap")); assertAcked(startDeleteSnapshot("test-repo", "test-snap").get()); - expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots("test-repo").addSnapshots("test-snap")); - ActionRequestBuilder builder = clusterAdmin().prepareSnapshotStatus("test-repo").addSnapshots("test-snap"); + expectThrows( + SnapshotMissingException.class, + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").addSnapshots("test-snap") + ); + ActionRequestBuilder builder = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") + .addSnapshots("test-snap"); expectThrows(SnapshotMissingException.class, builder); createFullSnapshot("test-repo", "test-snap"); @@ -730,7 +757,10 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { indexRandom(true, prepareIndex("test-idx-1").setSource("foo", "bar"), prepareIndex("test-idx-2").setSource("foo", "bar")); logger.info("--> creating snapshot"); - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") + .setWaitForCompletion(true) + .setIndices("test-idx-*") + .get(); logger.info("--> deleting shard level index file"); final Path indicesPath = repo.resolve("indices"); @@ -745,10 +775,11 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { } logger.info("--> creating another snapshot"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-2") - .setWaitForCompletion(true) - .setIndices("test-idx-1") - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-2" + ).setWaitForCompletion(true).setIndices("test-idx-1").get(); assertEquals( createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards() - 1 @@ -759,9 +790,11 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { + "because it uses snap-*.data files and not the index-N to determine what files to restore" ); indicesAdmin().prepareDelete("test-idx-1", "test-idx-2").get(); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-1") - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-1" + ).setWaitForCompletion(true).get(); assertEquals(0, restoreSnapshotResponse.getRestoreInfo().failedShards()); } @@ -784,14 +817,14 @@ private void assertRepositoryBlocked(String repo, String existingSnapshot) { logger.info("--> try to delete snapshot"); final RepositoryException ex = expectThrows( RepositoryException.class, - clusterAdmin().prepareDeleteSnapshot(repo, existingSnapshot) + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repo, existingSnapshot) ); assertThat(ex.getMessage(), containsString("concurrent modification of the index-N file")); logger.info("--> try to create snapshot"); final RepositoryException ex2 = expectThrows( RepositoryException.class, - clusterAdmin().prepareCreateSnapshot(repo, existingSnapshot) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repo, existingSnapshot) ); assertThat(ex2.getMessage(), containsString("The repository has been disabled to prevent data corruption")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java index ef8ae3cf1cffb..041d722591391 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java @@ -52,22 +52,22 @@ public void testShouldNotRestoreRepositoryMetadata() { assertThat(getSnapshot("test-repo-1", "test-snap").state(), equalTo(SnapshotState.SUCCESS)); logger.info("delete repository"); - assertAcked(clusterAdmin().prepareDeleteRepository("test-repo-1")); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1")); logger.info("create another repository"); createRepository("test-repo-2", "fs", repoPath); logger.info("restore snapshot"); - clusterAdmin().prepareRestoreSnapshot("test-repo-2", "test-snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo-2", "test-snap") .setRestoreGlobalState(true) .setIndices("-*") .setWaitForCompletion(true) .get(); logger.info("make sure old repository wasn't restored"); - ActionRequestBuilder builder = clusterAdmin().prepareGetRepositories("test-repo-1"); + ActionRequestBuilder builder = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, "test-repo-1"); expectThrows(RepositoryMissingException.class, builder); - assertThat(clusterAdmin().prepareGetRepositories("test-repo-2").get().repositories().size(), equalTo(1)); + assertThat(clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, "test-repo-2").get().repositories().size(), equalTo(1)); } public void testShouldRestoreOnlySnapshotMetadata() throws Exception { @@ -100,7 +100,7 @@ public void testShouldRestoreOnlySnapshotMetadata() throws Exception { })); logger.info("restore snapshot"); - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snapshot") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snapshot") .setRestoreGlobalState(true) .setIndices("-*") .setWaitForCompletion(true) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 88c94985194fc..b2b3de51dd04b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -130,7 +130,10 @@ public void testSnapshotDuringNodeShutdown() throws Exception { String blockedNode = blockNodeWithIndex("test-repo", "test-idx"); logger.info("--> snapshot"); - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(false) + .setIndices("test-idx") + .get(); logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo"); @@ -168,7 +171,10 @@ public void testSnapshotWithStuckNode() throws Exception { assertFileCount(repo, 0); logger.info("--> snapshot"); - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(false) + .setIndices("test-idx") + .get(); logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo"); @@ -178,7 +184,7 @@ public void testSnapshotWithStuckNode() throws Exception { ActionFuture deleteSnapshotResponseFuture = internalCluster().client(nodes.get(0)) .admin() .cluster() - .prepareDeleteSnapshot("test-repo", "test-snap") + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .execute(); // Make sure that abort makes some progress Thread.sleep(100); @@ -194,10 +200,13 @@ public void testSnapshotWithStuckNode() throws Exception { } logger.info("--> making sure that snapshot no longer exists"); - expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap")); + expectThrows( + SnapshotMissingException.class, + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").setSnapshots("test-snap") + ); logger.info("--> trigger repository cleanup"); - clusterAdmin().prepareCleanupRepository("test-repo").get(); + clusterAdmin().prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo").get(); // Expect two or three files to remain in the repository: // (1) index-latest @@ -266,7 +275,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> start snapshot with default settings without a closed index - should fail"); final SnapshotException sne = expectThrows( SnapshotException.class, - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(true) ); @@ -274,13 +283,13 @@ public void testRestoreIndexWithMissingShards() throws Exception { if (randomBoolean()) { logger.info("checking snapshot completion using status"); - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-2") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-2") .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(false) .setPartial(true) .get(); assertBusy(() -> { - SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") + SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap-2") .get(); List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); @@ -288,7 +297,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.trace("current snapshot status [{}]", snapshotStatuses.get(0)); assertTrue(snapshotStatuses.get(0).getState().completed()); }, 1, TimeUnit.MINUTES); - SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") + SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap-2") .get(); List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); @@ -308,7 +317,11 @@ public void testRestoreIndexWithMissingShards() throws Exception { }, 1, TimeUnit.MINUTES); } else { logger.info("checking snapshot completion using wait_for_completion flag"); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-2") + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-2" + ) .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(true) .setPartial(true) @@ -328,7 +341,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> restore incomplete snapshot - should fail"); assertFutureThrows( - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-2") .setRestoreGlobalState(false) .setWaitForCompletion(true) .execute(), @@ -336,11 +349,11 @@ public void testRestoreIndexWithMissingShards() throws Exception { ); logger.info("--> restore snapshot for the index that was snapshotted completely"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") - .setRestoreGlobalState(false) - .setIndices("test-idx-all") - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-2" + ).setRestoreGlobalState(false).setIndices("test-idx-all").setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo(), notNullValue()); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6)); assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(6)); @@ -349,7 +362,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> restore snapshot for the partial index"); cluster().wipeIndices("test-idx-some"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-2") .setRestoreGlobalState(false) .setIndices("test-idx-some") .setPartial(true) @@ -363,7 +376,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> restore snapshot for the index that didn't have any shards snapshotted successfully"); cluster().wipeIndices("test-idx-none"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-2") .setRestoreGlobalState(false) .setIndices("test-idx-none") .setPartial(true) @@ -376,7 +389,7 @@ public void testRestoreIndexWithMissingShards() throws Exception { assertThat(getCountForIndex("test-idx-some"), allOf(greaterThan(0L), lessThan(100L))); logger.info("--> restore snapshot for the closed index that was snapshotted completely"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-2") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-2") .setRestoreGlobalState(false) .setIndices("test-idx-closed") .setWaitForCompletion(true) @@ -429,7 +442,7 @@ public boolean clearData(String nodeName) { logger.info("--> restore index snapshot"); assertThat( - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-1") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setRestoreGlobalState(false) .setWaitForCompletion(true) .get() @@ -460,14 +473,14 @@ public void testRegistrationFailure() { internalCluster().startNode(nonMasterNode()); // Register mock repositories for (int i = 0; i < 5; i++) { - clusterAdmin().preparePutRepository("test-repo" + i) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo" + i) .setType("mock") .setSettings(Settings.builder().put("location", randomRepoPath())) .setVerify(false) .get(); } logger.info("--> make sure that properly setup repository can be registered on all nodes"); - clusterAdmin().preparePutRepository("test-repo-0") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-0") .setType("fs") .setSettings(Settings.builder().put("location", randomRepoPath())) .get(); @@ -557,7 +570,7 @@ public void testMasterShutdownDuringSnapshot() throws Exception { dataNodeClient().admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(false) .setIndices("test-idx") .get(); @@ -598,7 +611,7 @@ public void testMasterAndDataShutdownDuringSnapshot() throws Exception { dataNodeClient().admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(false) .setIndices("test-idx") .get(); @@ -663,7 +676,7 @@ public void testRestoreShrinkIndex() throws Exception { clusterAdmin().prepareHealth().setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("2"); logger.info("--> restore the shrunk index and ensure all shards are allocated"); - RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(repo, snapshot) + RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot) .setWaitForCompletion(true) .setIndices(shrunkIdx) .get(); @@ -678,7 +691,7 @@ public void testSnapshotWithDateMath() { logger.info("--> creating repository"); assertAcked( - clusterAdmin().preparePutRepository(repo) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repo) .setType("fs") .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) ); @@ -689,7 +702,7 @@ public void testSnapshotWithDateMath() { // snapshot could be taken before or after a day rollover final String expression2 = IndexNameExpressionResolver.resolveDateMathExpression(snapshotName); - SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(repo) + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repo) .setSnapshots(Sets.newHashSet(expression1, expression2).toArray(Strings.EMPTY_ARRAY)) .setIgnoreUnavailable(true) .get(); @@ -715,7 +728,9 @@ public void testSnapshotTotalAndIncrementalSizes() throws Exception { createRepository(repositoryName, "fs", repoPath); createFullSnapshot(repositoryName, snapshot0); - SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(repositoryName).setSnapshots(snapshot0).get(); + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repositoryName) + .setSnapshots(snapshot0) + .get(); List snapshots = response.getSnapshots(); @@ -751,7 +766,7 @@ public void testSnapshotTotalAndIncrementalSizes() throws Exception { // drop 1st one to avoid miscalculation as snapshot reuses some files of prev snapshot assertAcked(startDeleteSnapshot(repositoryName, snapshot0).get()); - response = clusterAdmin().prepareSnapshotStatus(repositoryName).setSnapshots(snapshot1).get(); + response = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repositoryName).setSnapshots(snapshot1).get(); final List snapshot1Files = scanSnapshotFolder(repoPath); final List snapshot1IndexMetaFiles = findRepoMetaBlobs(repoPath); @@ -829,7 +844,7 @@ public void testDeduplicateIndexMetadata() throws Exception { final List snapshot2IndexMetaFiles = findRepoMetaBlobs(repoPath); assertThat(snapshot2IndexMetaFiles, hasSize(2)); // should have created one new metadata blob - assertAcked(clusterAdmin().prepareDeleteSnapshot(repositoryName, snapshot0, snapshot1).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshot0, snapshot1).get()); final List snapshot3IndexMetaFiles = findRepoMetaBlobs(repoPath); assertThat(snapshot3IndexMetaFiles, hasSize(1)); // should have deleted the metadata blob referenced by the first two snapshots } @@ -852,7 +867,7 @@ public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception { setDisruptionScheme(disruption); client(internalCluster().getMasterName()).admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(false) .setIndices("test-idx") .get(); @@ -869,7 +884,7 @@ public boolean validateClusterForming() { logger.info("--> wait for shard snapshots to show as failed"); assertBusy( () -> assertThat( - clusterAdmin().prepareSnapshotStatus("test-repo") + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .get() .getSnapshots() @@ -886,7 +901,7 @@ public boolean validateClusterForming() { disruption.stopDisrupting(); // check that snapshot completes assertBusy(() -> { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots("test-repo") + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .setIgnoreUnavailable(true) .get(); @@ -912,7 +927,7 @@ public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception { logger.info("--> snapshot"); client(internalCluster().getMasterName()).admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(false) .setIndices("test-idx") .get(); @@ -922,7 +937,7 @@ public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception { logger.info("--> wait for shard snapshot of first primary to show as failed"); assertBusy( () -> assertThat( - clusterAdmin().prepareSnapshotStatus("test-repo") + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .get() .getSnapshots() @@ -940,7 +955,7 @@ public void testDataNodeRestartAfterShardSnapshotFailure() throws Exception { // check that snapshot completes with both failed shards being accounted for in the snapshot result assertBusy(() -> { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots("test-repo") + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .setIgnoreUnavailable(true) .get(); @@ -1000,7 +1015,7 @@ public void testRetentionLeasesClearedOnRestore() throws Exception { assertAcked(indicesAdmin().prepareClose(indexName)); logger.debug("--> restore index {} from snapshot", indexName); - RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) + RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .get(); assertThat(restoreResponse.getRestoreInfo().successfulShards(), equalTo(shardCount)); @@ -1117,7 +1132,7 @@ public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { ); logger.info("--> snapshot"); - clusterAdmin().prepareCreateSnapshot(repoName, "test-snap") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap") .setWaitForCompletion(false) .setPartial(true) .setIndices(indexName) @@ -1184,21 +1199,21 @@ public void testDeleteIndexDuringSnapshot() throws Exception { } logger.info("--> restore snapshot 1"); - clusterAdmin().prepareRestoreSnapshot(repoName, firstSnapshotName).get(); + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, firstSnapshotName).get(); ensureGreen(indexName); } // create and delete a snapshot of the given name and for the given single index in a loop until the index is removed from the cluster // at which point doneListener is resolved private void startSnapshotDeleteLoop(String repoName, String indexName, String snapshotName, ActionListener doneListener) { - clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .setPartial(true) .setIndices(indexName) .execute(new ActionListener<>() { @Override public void onResponse(CreateSnapshotResponse createSnapshotResponse) { - clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName) + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .execute(ActionTestUtils.assertNoFailureListener(acknowledgedResponse -> { assertAcked(acknowledgedResponse); startSnapshotDeleteLoop(repoName, indexName, snapshotName, doneListener); @@ -1215,7 +1230,7 @@ public void onFailure(Exception e) { public void testGetReposWithWildcard() { internalCluster().startMasterOnlyNode(); - List repositoryMetadata = clusterAdmin().prepareGetRepositories("*").get().repositories(); + List repositoryMetadata = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, "*").get().repositories(); assertThat(repositoryMetadata, empty()); } @@ -1231,9 +1246,13 @@ public void testConcurrentSnapshotAndRepoDelete() throws Exception { // concurrently trigger repository and snapshot deletes final List> deleteFutures = new ArrayList<>(snapshotCount); - final ActionFuture deleteRepoFuture = clusterAdmin().prepareDeleteRepository(repoName).execute(); + final ActionFuture deleteRepoFuture = clusterAdmin().prepareDeleteRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + repoName + ).execute(); for (String snapshotName : snapshotNames) { - deleteFutures.add(clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).execute()); + deleteFutures.add(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName).execute()); } try { @@ -1278,7 +1297,7 @@ public void testDeleteSnapshotsOfDifferentIndexSets() throws IllegalAccessExcept createSnapshot(repoName, snapshot1, List.of(index1)); createSnapshot(repoName, snapshot2, List.of(index2)); - clusterAdmin().prepareDeleteSnapshot(repoName, snapshot1, snapshot2).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot1, snapshot2).get(); mockLog.assertAllExpectationsMatched(); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java index c3dbfd03cae38..64c168ae73905 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java @@ -70,7 +70,10 @@ public void testResetSystemIndices() throws Exception { refresh("my_index"); // call the reset API - ResetFeatureStateResponse apiResponse = client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).get(); + ResetFeatureStateResponse apiResponse = client().execute( + ResetFeatureStateAction.INSTANCE, + new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT) + ).get(); assertThat( apiResponse.getFeatureStateResetStatuses(), containsInAnyOrder( @@ -105,7 +108,7 @@ public void testFeatureResetFailure() throws Exception { EvilSystemIndexTestPlugin.setBeEvil(true); ResetFeatureStateResponse resetFeatureStateResponse = client().execute( ResetFeatureStateAction.INSTANCE, - new ResetFeatureStateRequest() + new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT) ).get(); List failedFeatures = resetFeatureStateResponse.getFeatureStateResetStatuses() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index a04d1a5c8b02d..7c5f38fee02a9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -82,7 +82,10 @@ public void testSortBy() throws Exception { } private void doTestSortOrder(String repoName, Collection allSnapshotNames, SortOrder order) { - final List defaultSorting = clusterAdmin().prepareGetSnapshots(repoName).setOrder(order).get().getSnapshots(); + final List defaultSorting = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) + .setOrder(order) + .get() + .getSnapshots(); assertSnapshotListSorted(defaultSorting, null, order); final String[] repos = { repoName }; assertSnapshotListSorted(allSnapshotsSorted(allSnapshotNames, repos, SnapshotSortKey.NAME, order), SnapshotSortKey.NAME, order); @@ -187,7 +190,7 @@ public void testSortAndPaginateWithInProgress() throws Exception { assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.START_TIME); assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.NAME); assertStablePagination(repos, allSnapshotNames, SnapshotSortKey.INDICES); - final List currentSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List currentSnapshots = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(GetSnapshotsRequest.CURRENT_SNAPSHOT) .get() .getSnapshots(); @@ -196,7 +199,7 @@ public void testSortAndPaginateWithInProgress() throws Exception { } assertThat( - clusterAdmin().prepareGetSnapshots(matchAllPattern()) + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(GetSnapshotsRequest.CURRENT_SNAPSHOT, "-snap*") .get() .getSnapshots(), @@ -219,14 +222,14 @@ public void testPaginationRequiresVerboseListing() throws Exception { createNSnapshots(repoName, randomIntBetween(1, 5)); expectThrows( ActionRequestValidationException.class, - clusterAdmin().prepareGetSnapshots(repoName) + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) .setVerbose(false) .setSort(SnapshotSortKey.DURATION) .setSize(GetSnapshotsRequest.NO_LIMIT) ); expectThrows( ActionRequestValidationException.class, - clusterAdmin().prepareGetSnapshots(repoName) + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) .setVerbose(false) .setSort(SnapshotSortKey.START_TIME) .setSize(randomIntBetween(1, 100)) @@ -293,14 +296,23 @@ public void testExcludePatterns() throws Exception { ); assertThat(allInOtherWithoutOtherExplicit, is(allInOther)); - assertThat(clusterAdmin().prepareGetSnapshots(matchAllPattern()).setSnapshots("other*", "-o*").get().getSnapshots(), empty()); - assertThat(clusterAdmin().prepareGetSnapshots("other*", "-o*").setSnapshots(matchAllPattern()).get().getSnapshots(), empty()); assertThat( - clusterAdmin().prepareGetSnapshots("other*", otherRepo, "-o*").setSnapshots(matchAllPattern()).get().getSnapshots(), + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()).setSnapshots("other*", "-o*").get().getSnapshots(), + empty() + ); + assertThat( + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "other*", "-o*").setSnapshots(matchAllPattern()).get().getSnapshots(), + empty() + ); + assertThat( + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "other*", otherRepo, "-o*") + .setSnapshots(matchAllPattern()) + .get() + .getSnapshots(), empty() ); assertThat( - clusterAdmin().prepareGetSnapshots(matchAllPattern()) + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots("non-existing*", otherPrefixSnapshot1, "-o*") .get() .getSnapshots(), @@ -332,7 +344,7 @@ public void testNamesStartingInDash() { final SnapshotInfo weirdSnapshot1InWeird2 = createFullSnapshot(weirdRepo2, weirdSnapshot1); final SnapshotInfo weirdSnapshot2InWeird2 = createFullSnapshot(weirdRepo2, weirdSnapshot2); - final List allSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshots = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSort(SnapshotSortKey.REPOSITORY) .get() .getSnapshots(); @@ -395,14 +407,18 @@ public void testNamesStartingInDash() { } private List getAllByPatterns(String[] repos, String[] snapshots) { - return clusterAdmin().prepareGetSnapshots(repos).setSnapshots(snapshots).setSort(SnapshotSortKey.REPOSITORY).get().getSnapshots(); + return clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repos) + .setSnapshots(snapshots) + .setSort(SnapshotSortKey.REPOSITORY) + .get() + .getSnapshots(); } public void testFilterBySLMPolicy() throws Exception { final String repoName = "test-repo"; createRepository(repoName, "fs"); createNSnapshots(repoName, randomIntBetween(1, 5)); - final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List snapshotsWithoutPolicy = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.NAME) .get() @@ -410,7 +426,7 @@ public void testFilterBySLMPolicy() throws Exception { final String snapshotWithPolicy = "snapshot-with-policy"; final String policyName = "some-policy"; final SnapshotInfo withPolicy = assertSuccessful( - clusterAdmin().prepareCreateSnapshot(repoName, snapshotWithPolicy) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotWithPolicy) .setUserMetadata(Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, policyName)) .setWaitForCompletion(true) .execute() @@ -429,7 +445,7 @@ public void testFilterBySLMPolicy() throws Exception { final String snapshotWithOtherPolicy = "snapshot-with-other-policy"; final String otherPolicyName = "other-policy"; final SnapshotInfo withOtherPolicy = assertSuccessful( - clusterAdmin().prepareCreateSnapshot(repoName, snapshotWithOtherPolicy) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotWithOtherPolicy) .setUserMetadata(Map.of(SnapshotsService.POLICY_ID_METADATA_FIELD, otherPolicyName)) .setWaitForCompletion(true) .execute() @@ -438,7 +454,7 @@ public void testFilterBySLMPolicy() throws Exception { assertThat(getAllSnapshotsForPolicies(policyName, otherPolicyName), is(List.of(withOtherPolicy, withPolicy))); assertThat(getAllSnapshotsForPolicies(policyName, otherPolicyName, "no-such-policy*"), is(List.of(withOtherPolicy, withPolicy))); - final List allSnapshots = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshots = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.NAME) .get() @@ -459,7 +475,7 @@ public void testSortAfter() throws Exception { final SnapshotInfo snapshot3 = createFullSnapshotWithUniqueTimestamps(repoName, "snapshot-3", startTimes, durations); createIndexWithContent("index-3"); - final List allSnapshotInfo = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshotInfo = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.START_TIME) .get() @@ -486,7 +502,7 @@ public void testSortAfter() throws Exception { assertThat(allAfterNameAscending(name3), is(List.of(snapshot3))); assertThat(allAfterNameAscending("z"), empty()); - final List allSnapshotInfoDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshotInfoDesc = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.START_TIME) .setOrder(SortOrder.DESC) @@ -507,7 +523,7 @@ public void testSortAfter() throws Exception { assertThat(allBeforeNameDescending(name1), is(List.of(snapshot1))); assertThat(allBeforeNameDescending("a"), empty()); - final List allSnapshotInfoByDuration = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshotInfoByDuration = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.DURATION) .get() @@ -523,7 +539,7 @@ public void testSortAfter() throws Exception { assertThat(allAfterDurationAscending(duration3), is(List.of(allSnapshotInfoByDuration.get(2)))); assertThat(allAfterDurationAscending(duration3 + 1), empty()); - final List allSnapshotInfoByDurationDesc = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final List allSnapshotInfoByDurationDesc = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setSort(SnapshotSortKey.DURATION) .setOrder(SortOrder.DESC) @@ -541,7 +557,7 @@ public void testSortAfter() throws Exception { assertThat(allSnapshots(new String[] { "snap*" }, SnapshotSortKey.NAME, SortOrder.ASC, "a"), is(allSnapshotInfo)); assertThat(allSnapshots(new String[] { "o*" }, SnapshotSortKey.NAME, SortOrder.ASC, "a"), is(List.of(otherSnapshot))); - final GetSnapshotsResponse paginatedResponse = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final GetSnapshotsResponse paginatedResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots("snap*") .setSort(SnapshotSortKey.NAME) .setFromSortValue("a") @@ -550,7 +566,7 @@ public void testSortAfter() throws Exception { .get(); assertThat(paginatedResponse.getSnapshots(), is(List.of(snapshot2))); assertThat(paginatedResponse.totalCount(), is(3)); - final GetSnapshotsResponse paginatedResponse2 = clusterAdmin().prepareGetSnapshots(matchAllPattern()) + final GetSnapshotsResponse paginatedResponse2 = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots("snap*") .setSort(SnapshotSortKey.NAME) .setFromSortValue("a") @@ -570,7 +586,7 @@ public void testRetrievingSnapshotsWhenRepositoryIsMissing() throws Exception { final List snapshotNames = createNSnapshots(repoName, randomIntBetween(1, 10)); snapshotNames.sort(String::compareTo); - final GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(repoName, missingRepoName) + final GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName, missingRepoName) .setSort(SnapshotSortKey.NAME) .get(); assertThat(response.getSnapshots().stream().map(info -> info.snapshotId().getName()).toList(), equalTo(snapshotNames)); @@ -626,7 +642,7 @@ private List allBeforeDurationDescending(long duration) { } private static List allSnapshots(String[] snapshotNames, SnapshotSortKey sortBy, SortOrder order, Object fromSortValue) { - return clusterAdmin().prepareGetSnapshots(matchAllPattern()) + return clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(snapshotNames) .setSort(sortBy) .setFromSortValue(fromSortValue.toString()) @@ -636,7 +652,7 @@ private static List allSnapshots(String[] snapshotNames, SnapshotS } private static List getAllSnapshotsForPolicies(String... policies) { - return clusterAdmin().prepareGetSnapshots(matchAllPattern()) + return clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, matchAllPattern()) .setSnapshots(matchAllPattern()) .setPolicies(policies) .setSort(SnapshotSortKey.NAME) @@ -722,7 +738,7 @@ private static GetSnapshotsResponse sortedWithLimit(String[] repoNames, Snapshot } private static GetSnapshotsRequestBuilder baseGetSnapshotsRequest(String[] repoNames) { - return clusterAdmin().prepareGetSnapshots(repoNames) + return clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoNames) .setSnapshots("*", "-" + AbstractSnapshotIntegTestCase.OLD_VERSION_SNAPSHOT_PREFIX + "*"); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java index 0aa3475de7be1..8ab1ddd85fc55 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java @@ -75,7 +75,7 @@ public void testWhenMetadataAreLoaded() throws Exception { assertIndexMetadataLoads("snap", "others", 0); // Getting a snapshot does not load any metadata - GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots("repository") + GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "repository") .addSnapshots("snap") .setVerbose(randomBoolean()) .get(); @@ -85,7 +85,9 @@ public void testWhenMetadataAreLoaded() throws Exception { assertIndexMetadataLoads("snap", "others", 0); // Getting the status of a snapshot loads indices metadata but not global metadata - SnapshotsStatusResponse snapshotStatusResponse = clusterAdmin().prepareSnapshotStatus("repository").setSnapshots("snap").get(); + SnapshotsStatusResponse snapshotStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "repository") + .setSnapshots("snap") + .get(); assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); assertGlobalMetadataLoads("snap", 0); assertIndexMetadataLoads("snap", "docs", 1); @@ -94,7 +96,7 @@ public void testWhenMetadataAreLoaded() throws Exception { assertAcked(indicesAdmin().prepareDelete("docs", "others")); // Restoring a snapshot loads indices metadata but not the global state - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("repository", "snap") + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repository", "snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); @@ -105,7 +107,7 @@ public void testWhenMetadataAreLoaded() throws Exception { assertAcked(indicesAdmin().prepareDelete("docs")); // Restoring a snapshot with selective indices loads only required index metadata - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("repository", "snap") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repository", "snap") .setIndices("docs") .setWaitForCompletion(true) .get(); @@ -117,7 +119,7 @@ public void testWhenMetadataAreLoaded() throws Exception { assertAcked(indicesAdmin().prepareDelete("docs", "others")); // Restoring a snapshot including the global state loads it with the index metadata - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("repository", "snap") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repository", "snap") .setIndices("docs", "oth*") .setRestoreGlobalState(true) .setWaitForCompletion(true) @@ -128,7 +130,7 @@ public void testWhenMetadataAreLoaded() throws Exception { assertIndexMetadataLoads("snap", "others", 3); // Deleting a snapshot does not load the global metadata state but loads each index metadata - assertAcked(clusterAdmin().prepareDeleteSnapshot("repository", "snap").get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "repository", "snap").get()); assertGlobalMetadataLoads("snap", 1); assertIndexMetadataLoads("snap", "docs", 4); assertIndexMetadataLoads("snap", "others", 3); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java index 8fc6e9e2aa3d8..fc727007724de 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java @@ -118,16 +118,16 @@ public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { secondCluster.client() .admin() .cluster() - .preparePutRepository(repoNameOnSecondCluster) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoNameOnSecondCluster) .setType("fs") .setSettings(Settings.builder().put("location", repoPath)) .get(); - secondCluster.client().admin().cluster().prepareDeleteSnapshot(repoNameOnSecondCluster, "snap-1").get(); - secondCluster.client().admin().cluster().prepareDeleteSnapshot(repoNameOnSecondCluster, "snap-2").get(); + secondCluster.client().admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoNameOnSecondCluster, "snap-1").get(); + secondCluster.client().admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoNameOnSecondCluster, "snap-2").get(); final SnapshotException sne = expectThrows( SnapshotException.class, - clusterAdmin().prepareCreateSnapshot(repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true) ); assertThat(sne.getMessage(), containsString("failed to update snapshot in repository")); final RepositoryException cause = (RepositoryException) sne.getCause(); @@ -142,7 +142,7 @@ public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { + "] at generation [4]." ) ); - assertAcked(clusterAdmin().prepareDeleteRepository(repoNameOnFirstCluster).get()); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoNameOnFirstCluster).get()); createRepository(repoNameOnFirstCluster, "fs", repoPath); createFullSnapshot(repoNameOnFirstCluster, "snap-5"); } @@ -155,7 +155,7 @@ public void testConcurrentWipeAndRecreateFromOtherCluster() throws InterruptedEx createIndexWithRandomDocs("test-idx-1", randomIntBetween(1, 100)); createFullSnapshot(repoName, "snap-1"); - final String repoUuid = clusterAdmin().prepareGetRepositories(repoName) + final String repoUuid = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, repoName) .get() .repositories() .stream() @@ -170,7 +170,7 @@ public void testConcurrentWipeAndRecreateFromOtherCluster() throws InterruptedEx secondCluster.client() .admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType("fs") .setSettings(Settings.builder().put("location", repoPath).put(READONLY_SETTING_KEY, true)) ); @@ -178,7 +178,7 @@ public void testConcurrentWipeAndRecreateFromOtherCluster() throws InterruptedEx secondCluster.client() .admin() .cluster() - .prepareGetRepositories(repoName) + .prepareGetRepositories(TEST_REQUEST_TIMEOUT, repoName) .get() .repositories() .stream() @@ -189,12 +189,12 @@ public void testConcurrentWipeAndRecreateFromOtherCluster() throws InterruptedEx equalTo(repoUuid) ); - assertAcked(clusterAdmin().prepareDeleteRepository(repoName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName)); IOUtils.rm(internalCluster().getCurrentMasterNodeInstance(Environment.class).resolveRepoFile(repoPath.toString())); createRepository(repoName, "fs", repoPath); createFullSnapshot(repoName, "snap-1"); - final String newRepoUuid = clusterAdmin().prepareGetRepositories(repoName) + final String newRepoUuid = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, repoName) .get() .repositories() .stream() @@ -204,12 +204,13 @@ public void testConcurrentWipeAndRecreateFromOtherCluster() throws InterruptedEx .uuid(); assertThat(newRepoUuid, not(equalTo((repoUuid)))); - secondCluster.client().admin().cluster().prepareGetSnapshots(repoName).get(); // force another read of the repo data + secondCluster.client().admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get(); // force another read of the + // repo data assertThat( secondCluster.client() .admin() .cluster() - .prepareGetRepositories(repoName) + .prepareGetRepositories(TEST_REQUEST_TIMEOUT, repoName) .get() .repositories() .stream() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index 80ded243d3fb2..a96d127429b75 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -65,7 +65,10 @@ public void testRepositoryCreation() throws Exception { logger.info("--> verify the repository"); int numberOfFiles = FileSystemUtils.files(location).length; - VerifyRepositoryResponse verifyRepositoryResponse = client.admin().cluster().prepareVerifyRepository("test-repo-1").get(); + VerifyRepositoryResponse verifyRepositoryResponse = client.admin() + .cluster() + .prepareVerifyRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1") + .get(); assertThat(verifyRepositoryResponse.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); logger.info("--> verify that we didn't leave any files as a result of verification"); @@ -96,7 +99,7 @@ public void testRepositoryCreation() throws Exception { logger.info("--> check that both repositories can be retrieved by getRepositories query"); GetRepositoriesResponse repositoriesResponse = client.admin() .cluster() - .prepareGetRepositories(randomFrom("_all", "*", "test-repo-*")) + .prepareGetRepositories(TEST_REQUEST_TIMEOUT, randomFrom("_all", "*", "test-repo-*")) .get(); assertThat(repositoriesResponse.repositories().size(), equalTo(2)); assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-1"), notNullValue()); @@ -107,7 +110,7 @@ public void testRepositoryCreation() throws Exception { assertThat( client.admin() .cluster() - .preparePutRepository("test-repo-1") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1") .setType("fs") .setSettings(Settings.builder().put("location", location)) .get() @@ -117,14 +120,14 @@ public void testRepositoryCreation() throws Exception { assertEquals(beforeStateUuid, client.admin().cluster().prepareState().clear().get().getState().stateUUID()); logger.info("--> delete repository test-repo-1"); - client.admin().cluster().prepareDeleteRepository("test-repo-1").get(); - repositoriesResponse = client.admin().cluster().prepareGetRepositories().get(); + client.admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1").get(); + repositoriesResponse = client.admin().cluster().prepareGetRepositories(TEST_REQUEST_TIMEOUT).get(); assertThat(repositoriesResponse.repositories().size(), equalTo(1)); assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue()); logger.info("--> delete repository test-repo-2"); - client.admin().cluster().prepareDeleteRepository("test-repo-2").get(); - repositoriesResponse = client.admin().cluster().prepareGetRepositories().get(); + client.admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-2").get(); + repositoriesResponse = client.admin().cluster().prepareGetRepositories(TEST_REQUEST_TIMEOUT).get(); assertThat(repositoriesResponse.repositories().size(), equalTo(0)); } @@ -142,7 +145,7 @@ public void testMisconfiguredRepository() { logger.info("--> trying creating repository with incorrect settings"); try { - client.admin().cluster().preparePutRepository("test-repo").setType("fs").get(); + client.admin().cluster().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo").setType("fs").get(); fail("Shouldn't be here"); } catch (RepositoryException ex) { assertThat(ex.getCause().getMessage(), equalTo("[test-repo] missing location")); @@ -152,7 +155,10 @@ public void testMisconfiguredRepository() { Path invalidRepoPath = createTempDir().toAbsolutePath(); String location = invalidRepoPath.toString(); try { - clusterAdmin().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", location)).get(); + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") + .setType("fs") + .setSettings(Settings.builder().put("location", location)) + .get(); fail("Shouldn't be here"); } catch (RepositoryException ex) { assertThat( @@ -164,7 +170,11 @@ public void testMisconfiguredRepository() { public void testRepositoryAckTimeout() { logger.info("--> creating repository test-repo-1 with 0s timeout - shouldn't ack"); - AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository("test-repo-1") + AcknowledgedResponse putRepositoryResponse = clusterAdmin().preparePutRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "test-repo-1" + ) .setType("fs") .setSettings( Settings.builder() @@ -177,7 +187,7 @@ public void testRepositoryAckTimeout() { assertThat(putRepositoryResponse.isAcknowledged(), equalTo(false)); logger.info("--> creating repository test-repo-2 with standard timeout - should ack"); - putRepositoryResponse = clusterAdmin().preparePutRepository("test-repo-2") + putRepositoryResponse = clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-2") .setType("fs") .setSettings( Settings.builder() @@ -189,13 +199,15 @@ public void testRepositoryAckTimeout() { assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); logger.info("--> deleting repository test-repo-2 with 0s timeout - shouldn't ack"); - AcknowledgedResponse deleteRepositoryResponse = clusterAdmin().prepareDeleteRepository("test-repo-2") - .setTimeout(TimeValue.ZERO) - .get(); + AcknowledgedResponse deleteRepositoryResponse = clusterAdmin().prepareDeleteRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "test-repo-2" + ).setTimeout(TimeValue.ZERO).get(); assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(false)); logger.info("--> deleting repository test-repo-1 with standard timeout - should ack"); - deleteRepositoryResponse = clusterAdmin().prepareDeleteRepository("test-repo-1").get(); + deleteRepositoryResponse = clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1").get(); assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(true)); } @@ -209,7 +221,7 @@ public void testRepositoryVerification() { logger.info("--> creating repository that cannot write any files - should fail"); ActionRequestBuilder builder3 = client.admin() .cluster() - .preparePutRepository("test-repo-1") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1") .setType("mock") .setSettings(settings); expectThrows(RepositoryVerificationException.class, builder3); @@ -217,25 +229,41 @@ public void testRepositoryVerification() { logger.info("--> creating read-only repository that cannot read any files - should fail"); ActionRequestBuilder builder2 = client.admin() .cluster() - .preparePutRepository("test-repo-2") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-2") .setType("mock") .setSettings(readonlySettings); expectThrows(RepositoryVerificationException.class, builder2); logger.info("--> creating repository that cannot write any files, but suppress verification - should be acked"); - assertAcked(client.admin().cluster().preparePutRepository("test-repo-1").setType("mock").setSettings(settings).setVerify(false)); + assertAcked( + client.admin() + .cluster() + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1") + .setType("mock") + .setSettings(settings) + .setVerify(false) + ); logger.info("--> verifying repository"); - ActionRequestBuilder builder1 = client.admin().cluster().prepareVerifyRepository("test-repo-1"); + ActionRequestBuilder builder1 = client.admin() + .cluster() + .prepareVerifyRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1"); expectThrows(RepositoryVerificationException.class, builder1); logger.info("--> creating read-only repository that cannot read any files, but suppress verification - should be acked"); assertAcked( - client.admin().cluster().preparePutRepository("test-repo-2").setType("mock").setSettings(readonlySettings).setVerify(false) + client.admin() + .cluster() + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-2") + .setType("mock") + .setSettings(readonlySettings) + .setVerify(false) ); logger.info("--> verifying repository"); - ActionRequestBuilder builder = client.admin().cluster().prepareVerifyRepository("test-repo-2"); + ActionRequestBuilder builder = client.admin() + .cluster() + .prepareVerifyRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-2"); expectThrows(RepositoryVerificationException.class, builder); Path location = randomRepoPath(); @@ -244,7 +272,7 @@ public void testRepositoryVerification() { try { client.admin() .cluster() - .preparePutRepository("test-repo-1") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-1") .setType("mock") .setSettings(Settings.builder().put("location", location).put("localize_location", true)) .get(); @@ -258,7 +286,7 @@ public void testRepositoryConflict() throws Exception { logger.info("--> creating repository"); final String repo = "test-repo"; assertAcked( - clusterAdmin().preparePutRepository(repo) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repo) .setType("mock") .setSettings( Settings.builder() @@ -276,11 +304,11 @@ public void testRepositoryConflict() throws Exception { } refresh(); final String snapshot1 = "test-snap1"; - clusterAdmin().prepareCreateSnapshot(repo, snapshot1).setWaitForCompletion(true).get(); + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot1).setWaitForCompletion(true).get(); String blockedNode = internalCluster().getMasterName(); blockMasterOnWriteIndexFile(repo); logger.info("--> start deletion of snapshot"); - ActionFuture future = clusterAdmin().prepareDeleteSnapshot(repo, snapshot1).execute(); + ActionFuture future = clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot1).execute(); logger.info("--> waiting for block to kick in on node [{}]", blockedNode); waitForBlock(blockedNode, repo); @@ -295,14 +323,17 @@ public void testRepositoryConflict() throws Exception { ); logger.info("--> try deleting the repository, should fail because the deletion of the snapshot is in progress"); - RepositoryConflictException e1 = expectThrows(RepositoryConflictException.class, clusterAdmin().prepareDeleteRepository(repo)); + RepositoryConflictException e1 = expectThrows( + RepositoryConflictException.class, + clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repo) + ); assertThat(e1.status(), equalTo(RestStatus.CONFLICT)); assertThat(e1.getMessage(), containsString("trying to modify or unregister repository that is currently used")); logger.info("--> try updating the repository, should fail because the deletion of the snapshot is in progress"); RepositoryConflictException e2 = expectThrows( RepositoryConflictException.class, - clusterAdmin().preparePutRepository(repo) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repo) // if "true" will deadlock on snapshot thread pool, we are running with single thread which is busy at the moment .setVerify(false) .setType("mock") @@ -357,14 +388,14 @@ public void testLeakedStaleIndicesAreDeletedBySubsequentDelete() throws Exceptio repository.setFailOnDeleteContainer(true); logger.info("--> delete the second snapshot"); - client.admin().cluster().prepareDeleteSnapshot(repositoryName, snapshot2Name).get(); + client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshot2Name).get(); // Make repository work normally repository.setFailOnDeleteContainer(false); // This snapshot should delete last snapshot's residual stale indices as well logger.info("--> delete snapshot one"); - client.admin().cluster().prepareDeleteSnapshot(repositoryName, snapshot1Name).get(); + client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshot1Name).get(); logger.info("--> check no leftover files"); assertFileCount(repositoryPath, 2); // just the index-N and index.latest blobs @@ -443,7 +474,7 @@ public void run() { clusterService.addListener(clusterStateListener); final var deleteFuture = new PlainActionFuture(); - client.admin().cluster().prepareDeleteSnapshot(repositoryName, snapshotName).execute(deleteFuture); + client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName).execute(deleteFuture); safeAwait(barrier); // wait for all the snapshot threads to be blocked clusterService.removeListener(clusterStateListener); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java index d8bc9327a2edd..7664bbd6c91ee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java @@ -57,11 +57,11 @@ public void testRepositoryThrottlingStats() throws Exception { createSnapshot("test-repo", "test-snap", Collections.singletonList("test-idx")); logger.info("--> restore from snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setRenamePattern("test-") - .setRenameReplacement("test2-") - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setRenamePattern("test-").setRenameReplacement("test2-").setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 100); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index 7aa1603735afe..7626e59cd1b9d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -91,14 +91,14 @@ public void testParallelRestoreOperations() { RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() .cluster() - .prepareRestoreSnapshot(repoName, snapshotName1) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName1) .setWaitForCompletion(false) .setRenamePattern(indexName1) .setRenameReplacement(restoredIndexName1) .get(); RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() .cluster() - .prepareRestoreSnapshot(repoName, snapshotName2) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName2) .setWaitForCompletion(false) .setRenamePattern(indexName2) .setRenameReplacement(restoredIndexName2) @@ -135,7 +135,7 @@ public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { ActionFuture restoreSnapshotResponse1 = client.admin() .cluster() - .prepareRestoreSnapshot(repoName, snapshotName) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setIndices(indexName1) .setRenamePattern(indexName1) .setRenameReplacement(restoredIndexName1) @@ -145,7 +145,7 @@ public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { ActionFuture restoreSnapshotResponse2 = client.admin() .cluster() - .prepareRestoreSnapshot(repoName, snapshotName) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setIndices(sameSourceIndex ? indexName1 : indexName2) .setRenamePattern(sameSourceIndex ? indexName1 : indexName2) .setRenameReplacement(restoredIndexName2) @@ -198,7 +198,7 @@ public void testRestoreLogging() throws IllegalAccessException { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(repoName, snapshotName) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(false) .setRenamePattern(indexName) .setRenameReplacement(restoredIndexName) @@ -243,9 +243,11 @@ public void testRestoreIncreasesPrimaryTerms() { assertAcked(indicesAdmin().prepareClose(indexName)); - final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .get(); + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(numPrimaries)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); @@ -297,9 +299,11 @@ public void testRestoreWithDifferentMappingsAndSettings() throws Exception { indicesAdmin().prepareClose("test-idx").get(); logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); logger.info("--> assert that old mapping is restored"); @@ -338,10 +342,11 @@ public void testRestoreAliases() throws Exception { assertTrue(indicesAdmin().prepareGetAliases("alias-1").get().getAliases().isEmpty()); logger.info("--> restore snapshot with aliases"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).setRestoreGlobalState(true).get(); // We don't restore any indices here assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), @@ -363,7 +368,7 @@ public void testRestoreAliases() throws Exception { assertTrue(indicesAdmin().prepareGetAliases("alias-1").get().getAliases().isEmpty()); logger.info("--> restore snapshot without aliases"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setRestoreGlobalState(true) .setIncludeAliases(false) @@ -415,10 +420,11 @@ public void testRestoreTemplates() throws Exception { assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> restore cluster state"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).setRestoreGlobalState(true).get(); // We don't restore any indices here assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); @@ -452,7 +458,7 @@ public void testRenameOnRestore() throws Exception { logger.info("--> restore indices with different names"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setRenamePattern("(.+)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) @@ -468,7 +474,7 @@ public void testRenameOnRestore() throws Exception { logger.info("--> and try to restore these indices again"); restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setRenamePattern("(.+)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) @@ -484,7 +490,7 @@ public void testRenameOnRestore() throws Exception { logger.info("--> restore indices with different names"); restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setRenamePattern("(.+-2)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) @@ -498,7 +504,7 @@ public void testRenameOnRestore() throws Exception { try { client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setRenamePattern("(.+)") .setRenameReplacement("same-name") .setWaitForCompletion(true) @@ -512,7 +518,7 @@ public void testRenameOnRestore() throws Exception { try { client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setRenamePattern("test-idx-2") .setRenameReplacement("test-idx-1") .setWaitForCompletion(true) @@ -526,7 +532,7 @@ public void testRenameOnRestore() throws Exception { try { client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices("test-idx-1") .setRenamePattern(".+") .setRenameReplacement("__WRONG__") @@ -541,7 +547,7 @@ public void testRenameOnRestore() throws Exception { try { client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices("test-idx-1") .setRenamePattern(".+") .setRenameReplacement("alias-3") @@ -556,7 +562,7 @@ public void testRenameOnRestore() throws Exception { try { client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices("test-idx-1") .setRenamePattern("test-idx") .setRenameReplacement("alias") @@ -571,7 +577,7 @@ public void testRenameOnRestore() throws Exception { try { client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices("test-idx-1", "test-idx-2") .setRenamePattern("test-idx-1") .setRenameReplacement("alias-2") @@ -585,7 +591,7 @@ public void testRenameOnRestore() throws Exception { logger.info("--> try renaming indices into existing alias of itself, but don't restore aliases "); restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices("test-idx-1") .setRenamePattern("test-idx") .setRenameReplacement("alias") @@ -614,7 +620,7 @@ public void testDynamicRestoreThrottling() throws Exception { updateClusterSettings(Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "100b")); ActionFuture restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .execute(); @@ -686,7 +692,7 @@ public void testChangeSettingsOnRestore() throws Exception { logger.info("--> try restoring while changing the number of shards - should fail"); ActionRequestBuilder builder1 = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIgnoreIndexSettings("index.analysis.*") .setIndexSettings(newIncorrectIndexSettings) .setWaitForCompletion(true); @@ -699,7 +705,7 @@ public void testChangeSettingsOnRestore() throws Exception { .build(); ActionRequestBuilder builder = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIgnoreIndexSettings("index.analysis.*") .setIndexSettings(newIncorrectReplicasIndexSettings) .setWaitForCompletion(true); @@ -708,7 +714,7 @@ public void testChangeSettingsOnRestore() throws Exception { logger.info("--> restore index with correct settings from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIgnoreIndexSettings("index.analysis.*") .setIndexSettings(newIndexSettings) .setWaitForCompletion(true) @@ -731,7 +737,7 @@ public void testChangeSettingsOnRestore() throws Exception { logger.info("--> restore index with correct settings from the snapshot"); restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIgnoreIndexSettings("*") // delete everything we can delete .setIndexSettings(newIndexSettings) .setWaitForCompletion(true) @@ -806,7 +812,7 @@ public void testRecreateBlocksOnRestore() throws Exception { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndexSettings(changedSettings) .setWaitForCompletion(true) .get(); @@ -858,7 +864,7 @@ public void testForbidDisableSoftDeletesDuringRestore() throws Exception { createSnapshot("test-repo", "snapshot-0", Collections.singletonList("test-index")); final SnapshotRestoreException restoreError = expectThrows( SnapshotRestoreException.class, - clusterAdmin().prepareRestoreSnapshot("test-repo", "snapshot-0") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "snapshot-0") .setIndexSettings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), false)) .setRenamePattern("test-index") .setRenameReplacement("new-index") @@ -874,7 +880,7 @@ public void testFailOnAncientVersion() throws Exception { final String oldSnapshot = initWithSnapshotVersion(repoName, repoPath, oldVersion); final SnapshotRestoreException snapshotRestoreException = expectThrows( SnapshotRestoreException.class, - clusterAdmin().prepareRestoreSnapshot(repoName, oldSnapshot) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, oldSnapshot) ); assertThat( snapshotRestoreException.getMessage(), @@ -903,11 +909,11 @@ public void testNoWarningsOnRestoreOverClosedIndex() throws IllegalAccessExcepti new MockLog.UnseenEventExpectation("no warnings", FileRestoreContext.class.getCanonicalName(), Level.WARN, "*") ); - final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) - .setIndices(indexName) - .setRestoreGlobalState(false) - .setWaitForCompletion(true) - .get(); + final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotName + ).setIndices(indexName).setRestoreGlobalState(false).setWaitForCompletion(true).get(); assertEquals(0, restoreSnapshotResponse.getRestoreInfo().failedShards()); mockLog.assertAllExpectationsMatched(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index d625b53785d38..a651537c77539 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -157,7 +157,7 @@ public void testBasicWorkFlow() throws Exception { createSnapshot("test-repo", "test-snap", Arrays.asList(indicesToSnapshot)); - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo") + List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots(randomFrom("test-snap", "_all", "*", "*-snap", "test*")) .get() .getSnapshots(); @@ -189,9 +189,11 @@ public void testBasicWorkFlow() throws Exception { indicesAdmin().prepareClose("test-idx-1", "test-idx-2").get(); logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -216,7 +218,7 @@ public void testBasicWorkFlow() throws Exception { logger.info("--> delete indices"); cluster().wipeIndices("test-idx-1", "test-idx-2"); logger.info("--> restore one index after deletion"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx-*", "-test-idx-2") .get(); @@ -263,11 +265,11 @@ public void testSingleGetAfterRestore() throws Exception { createRepository(repoName, "fs", absolutePath); createSnapshot(repoName, snapshotName, Collections.singletonList(indexName)); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) - .setWaitForCompletion(true) - .setRenamePattern(indexName) - .setRenameReplacement(restoredIndexName) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotName + ).setWaitForCompletion(true).setRenamePattern(indexName).setRenameReplacement(restoredIndexName).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(client().prepareGet(restoredIndexName, docId).get().isExists(), equalTo(true)); @@ -295,9 +297,11 @@ public void testFreshIndexUUID() { indicesAdmin().prepareClose("test").get(); logger.info("--> restore all indices from the snapshot"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); ensureGreen(); @@ -310,7 +314,7 @@ public void testFreshIndexUUID() { ); logger.info("--> restore indices with different names"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setRenamePattern("(.+)") .setRenameReplacement("$1-copy") .setWaitForCompletion(true) @@ -342,7 +346,7 @@ public void testSnapshotFileFailureDuringSnapshot() throws InterruptedException logger.info("--> creating repository"); assertAcked( - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("mock") .setSettings( Settings.builder() @@ -360,10 +364,11 @@ public void testSnapshotFileFailureDuringSnapshot() throws InterruptedException logger.info("--> snapshot"); try { - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .setIndices("test-idx") - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).setIndices("test-idx").get(); if (createSnapshotResponse.getSnapshotInfo().totalShards() == createSnapshotResponse.getSnapshotInfo().successfulShards()) { // If we are here, that means we didn't have any failures, let's check it assertThat(getFailureCount("test-repo"), equalTo(0L)); @@ -408,7 +413,7 @@ public void testDataFileFailureDuringSnapshot() throws Exception { createIndexWithRandomDocs("test-idx", 100); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") .get(); @@ -430,7 +435,7 @@ public void testDataFileFailureDuringSnapshot() throws Exception { assertThat(snapshotInfo.totalShards(), greaterThan(snapshotInfo.successfulShards())); // Verify that snapshot status also contains the same failures - SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") + SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .addSnapshots("test-snap") .get(); assertThat(snapshotsStatusResponse.getSnapshots().size(), equalTo(1)); @@ -486,7 +491,7 @@ public void testDataFileFailureDuringRestore() throws Exception { logger.info("--> restore index after deletion"); final RestoreSnapshotResponse restoreResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); @@ -542,7 +547,7 @@ public void testDataFileCorruptionDuringRestore() throws Exception { logger.info("--> restore corrupt index"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -640,13 +645,13 @@ private void unrestorableUseCase( // update the test repository assertAcked( - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("mock") .setSettings(Settings.builder().put("location", repositoryLocation).put(repositorySettings).build()) ); // attempt to restore the snapshot with the given settings - RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices(indexName) .setIndexSettings(restoreIndexSettings) .setWaitForCompletion(true) @@ -685,7 +690,9 @@ private void unrestorableUseCase( // delete the index and restore again assertAcked(indicesAdmin().prepareDelete(indexName)); - restoreResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).get(); + restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(true) + .get(); assertThat(restoreResponse.getRestoreInfo().totalShards(), equalTo(numShards.numPrimaries)); assertThat(restoreResponse.getRestoreInfo().successfulShards(), equalTo(numShards.numPrimaries)); @@ -721,7 +728,7 @@ public void testDeletionOfFailingToRecoverIndexShouldStopRestore() throws Except logger.info("--> restore index after deletion"); ActionFuture restoreSnapshotResponseFuture = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .execute(); @@ -746,7 +753,7 @@ public void testDeletionOfFailingToRecoverIndexShouldStopRestore() throws Except logger.info("--> trying to restore index again"); restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -770,7 +777,9 @@ public void testUnallocatedShards() { logger.info("--> snapshot"); final SnapshotException sne = expectThrows( SnapshotException.class, - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices("test-idx") ); assertThat(sne.getMessage(), containsString("the following indices have unassigned primary shards")); assertThat(getRepositoryData("test-repo"), is(RepositoryData.EMPTY)); @@ -811,12 +820,13 @@ public void testDeleteSnapshot() throws Exception { if (randomBoolean()) { for (int i = 1; i < numberOfSnapshots - 1; i++) { - client.admin().cluster().prepareDeleteSnapshot("test-repo", new String[] { "test-snap-" + i }).get(); + client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", new String[] { "test-snap-" + i }).get(); } } else { client.admin() .cluster() .prepareDeleteSnapshot( + TEST_REQUEST_TIMEOUT, "test-repo", IntStream.range(1, numberOfSnapshots - 1).mapToObj(i -> "test-snap-" + i).toArray(String[]::new) ) @@ -834,7 +844,7 @@ public void testDeleteSnapshot() throws Exception { String lastSnapshot = "test-snap-" + (numberOfSnapshots - 1); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", lastSnapshot) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", lastSnapshot) .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -880,7 +890,12 @@ public void testMoveShardWhileSnapshotting() throws Exception { String blockedNode = blockNodeWithIndex("test-repo", "test-idx"); logger.info("--> snapshot"); - client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); + client.admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(false) + .setIndices("test-idx") + .get(); logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo"); @@ -905,7 +920,7 @@ public void testMoveShardWhileSnapshotting() throws Exception { logger.info("--> restore index"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -937,7 +952,12 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { String blockedNode = blockNodeWithIndex("test-repo", "test-idx"); logger.info("--> snapshot"); - client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); + client.admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(false) + .setIndices("test-idx") + .get(); logger.info("--> waiting for block to kick in"); waitForBlock(blockedNode, "test-repo"); @@ -945,7 +965,10 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { logger.info("--> execution was blocked on node [{}], trying to delete repository", blockedNode); try { - client.admin().cluster().prepareDeleteRepository(randomFrom("test-repo", "test-*", "*")).get(); + client.admin() + .cluster() + .prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, randomFrom("test-repo", "test-*", "*")) + .get(); fail("shouldn't be able to delete in-use repository"); } catch (Exception ex) { logger.info("--> in-use repository deletion failed"); @@ -959,7 +982,7 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { try { client.admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setVerify(false) .setType("fs") .setSettings(Settings.builder().put("location", repositoryLocation.resolve("test"))) @@ -973,7 +996,7 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { assertAcked( client.admin() .cluster() - .preparePutRepository("test-repo-2") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo-2") .setVerify(false) // do not do verification itself as snapshot threads could be fully blocked .setType("fs") .setSettings(Settings.builder().put("location", repositoryLocation.resolve("test"))) @@ -996,7 +1019,7 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { logger.info("--> restore index"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -1029,7 +1052,7 @@ public void testReadonlyRepository() throws Exception { logger.info("--> restore index after deletion"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("readonly-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "readonly-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") .get(); @@ -1037,12 +1060,15 @@ public void testReadonlyRepository() throws Exception { assertDocCount("test-idx", 100L); logger.info("--> list available shapshots"); - GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("readonly-repo").get(); + GetSnapshotsResponse getSnapshotsResponse = client.admin() + .cluster() + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "readonly-repo") + .get(); assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(1)); logger.info("--> try deleting snapshot"); assertRequestBuilderThrows( - client.admin().cluster().prepareDeleteSnapshot("readonly-repo", "test-snap"), + client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "readonly-repo", "test-snap"), RepositoryException.class, "repository is readonly" ); @@ -1051,7 +1077,7 @@ public void testReadonlyRepository() throws Exception { assertRequestBuilderThrows( client.admin() .cluster() - .prepareCreateSnapshot("readonly-repo", "test-snap-2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "readonly-repo", "test-snap-2") .setWaitForCompletion(true) .setIndices("test-idx"), RepositoryException.class, @@ -1086,7 +1112,7 @@ public void testSnapshotStatus() throws Exception { logger.info("--> snapshot"); client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(false) .setIncludeGlobalState(false) .setIndices("test-idx") @@ -1112,7 +1138,7 @@ public void testSnapshotStatus() throws Exception { }); logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot", blockedNode); - SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus("test-repo").get(); + SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo").get(); assertThat(response.getSnapshots().size(), equalTo(1)); SnapshotStatus snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getState(), equalTo(State.STARTED)); @@ -1127,7 +1153,7 @@ public void testSnapshotStatus() throws Exception { } logger.info("--> checking snapshot status for all currently running and snapshot with empty repository"); - response = client.admin().cluster().prepareSnapshotStatus().get(); + response = client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getSnapshots().size(), equalTo(1)); snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getState(), equalTo(State.STARTED)); @@ -1142,11 +1168,15 @@ public void testSnapshotStatus() throws Exception { } logger.info("--> checking that _current returns the currently running snapshot"); - GetSnapshotsResponse getResponse = client.admin().cluster().prepareGetSnapshots("test-repo").setCurrentSnapshot().get(); + GetSnapshotsResponse getResponse = client.admin() + .cluster() + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setCurrentSnapshot() + .get(); assertThat(getResponse.getSnapshots().size(), equalTo(1)); SnapshotInfo snapshotInfo = getResponse.getSnapshots().get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.IN_PROGRESS)); - snapshotStatus = client.admin().cluster().prepareSnapshotStatus().get().getSnapshots().get(0); + snapshotStatus = client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT).get().getSnapshots().get(0); assertThat(snapshotInfo.totalShards(), equalTo(snapshotStatus.getIndices().get("test-idx").getShardsStats().getTotalShards())); assertThat(snapshotInfo.successfulShards(), equalTo(snapshotStatus.getIndices().get("test-idx").getShardsStats().getDoneShards())); assertThat(snapshotInfo.shardFailures().size(), equalTo(0)); @@ -1159,7 +1189,7 @@ public void testSnapshotStatus() throws Exception { logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size()); logger.info("--> checking snapshot status again after snapshot is done"); - response = client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap").get(); + response = client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo").addSnapshots("test-snap").get(); snapshotStatus = response.getSnapshots().get(0); assertThat(snapshotStatus.getIndices().size(), equalTo(1)); assertThat(snapshotStatus.includeGlobalState(), equalTo(false)); @@ -1172,25 +1202,31 @@ public void testSnapshotStatus() throws Exception { assertThat(indexStatus.getShards().size(), equalTo(snapshotInfo.totalShards())); logger.info("--> checking snapshot status after it is done with empty repository"); - response = client.admin().cluster().prepareSnapshotStatus().get(); + response = client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT).get(); assertThat(response.getSnapshots().size(), equalTo(0)); logger.info("--> checking that _current no longer returns the snapshot"); assertThat( - client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("_current").get().getSnapshots().isEmpty(), + client.admin() + .cluster() + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .addSnapshots("_current") + .get() + .getSnapshots() + .isEmpty(), equalTo(true) ); // test that getting an unavailable snapshot status throws an exception if ignoreUnavailable is false on the request SnapshotMissingException ex = expectThrows( SnapshotMissingException.class, - client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-doesnt-exist") + client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo").addSnapshots("test-snap-doesnt-exist") ); assertEquals("[test-repo:test-snap-doesnt-exist] is missing", ex.getMessage()); // test that getting an unavailable snapshot status does not throw an exception if ignoreUnavailable is true on the request response = client.admin() .cluster() - .prepareSnapshotStatus("test-repo") + .prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .addSnapshots("test-snap-doesnt-exist") .setIgnoreUnavailable(true) .get(); @@ -1199,7 +1235,7 @@ public void testSnapshotStatus() throws Exception { // (available one should be returned) response = client.admin() .cluster() - .prepareSnapshotStatus("test-repo") + .prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .addSnapshots("test-snap", "test-snap-doesnt-exist") .setIgnoreUnavailable(true) .get(); @@ -1229,7 +1265,12 @@ public void testSnapshotRelocatingPrimary() throws Exception { ); logger.info("--> snapshot"); - client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get(); + client.admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(false) + .setIndices("test-idx") + .get(); awaitNoMoreRunningOperations(); SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap"); @@ -1256,7 +1297,7 @@ public void testSnapshotMoreThanOnce() throws InterruptedException { { SnapshotStatus snapshotStatus = client.admin() .cluster() - .prepareSnapshotStatus("test-repo") + .prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test") .get() .getSnapshots() @@ -1272,7 +1313,7 @@ public void testSnapshotMoreThanOnce() throws InterruptedException { { SnapshotStatus snapshotStatus = client.admin() .cluster() - .prepareSnapshotStatus("test-repo") + .prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-1") .get() .getSnapshots() @@ -1289,7 +1330,7 @@ public void testSnapshotMoreThanOnce() throws InterruptedException { { SnapshotStatus snapshotStatus = client.admin() .cluster() - .prepareSnapshotStatus("test-repo") + .prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-2") .get() .getSnapshots() @@ -1320,7 +1361,7 @@ public void testCloseOrDeleteIndexDuringSnapshot() throws Exception { createIndexWithRandomDocs("test-idx-3", 100); logger.info("--> snapshot"); - ActionFuture future = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") + ActionFuture future = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices("test-idx-*") .setWaitForCompletion(true) .setPartial(false) @@ -1377,7 +1418,11 @@ public void testCloseIndexDuringRestore() throws Exception { final ActionFuture restoreFut; try { logger.info("--> start restore"); - restoreFut = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute(); + restoreFut = client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setWaitForCompletion(true) + .execute(); logger.info("--> waiting for block to kick in"); waitForBlockOnAnyDataNode("test-repo"); @@ -1419,7 +1464,7 @@ public void testDeleteSnapshotWhileRestoringFails() throws Exception { assertThat( client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setIndices(indexName) .setWaitForCompletion(true) .get() @@ -1431,7 +1476,7 @@ public void testDeleteSnapshotWhileRestoringFails() throws Exception { assertThat( client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName2) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName2) .setIndices(indexName) .setWaitForCompletion(true) .get() @@ -1449,7 +1494,11 @@ public void testDeleteSnapshotWhileRestoringFails() throws Exception { final ActionFuture restoreFut; try { logger.info("--> start restore"); - restoreFut = client.admin().cluster().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(true).execute(); + restoreFut = client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .execute(); logger.info("--> waiting for block to kick in"); waitForBlockOnAnyDataNode(repoName); @@ -1457,7 +1506,7 @@ public void testDeleteSnapshotWhileRestoringFails() throws Exception { logger.info("--> try deleting the snapshot while the restore is in progress (should throw an error)"); ConcurrentSnapshotExecutionException e = expectThrows( ConcurrentSnapshotExecutionException.class, - clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName) + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) ); assertEquals(repoName, e.getRepositoryName()); assertEquals(snapshotName, e.getSnapshotName()); @@ -1487,10 +1536,22 @@ public void testSnapshotName() throws Exception { createRepository("test-repo", "fs"); - expectThrows(InvalidSnapshotNameException.class, client.admin().cluster().prepareCreateSnapshot("test-repo", "_foo")); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("_foo")); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareDeleteSnapshot("test-repo", "_foo")); - expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("_foo")); + expectThrows( + InvalidSnapshotNameException.class, + client.admin().cluster().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "_foo") + ); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").setSnapshots("_foo") + ); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "_foo") + ); + expectThrows( + SnapshotMissingException.class, + client.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo").setSnapshots("_foo") + ); } public void testListCorruptedSnapshot() throws Exception { @@ -1525,7 +1586,7 @@ public void testListCorruptedSnapshot() throws Exception { logger.info("--> get snapshots request should return both snapshots"); List snapshotInfos = client.admin() .cluster() - .prepareGetSnapshots("test-repo") + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setIgnoreUnavailable(true) .get() .getSnapshots(); @@ -1536,7 +1597,7 @@ public void testListCorruptedSnapshot() throws Exception { final SnapshotException ex = expectThrows( SnapshotException.class, - client.admin().cluster().prepareGetSnapshots("test-repo").setIgnoreUnavailable(false) + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").setIgnoreUnavailable(false) ); assertThat(ex.getRepositoryName(), equalTo("test-repo")); assertThat(ex.getSnapshotName(), equalTo("test-snap-2")); @@ -1565,12 +1626,14 @@ public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { outChan.truncate(randomInt(10)); } - List snapshotInfos = clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(); + List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo(snapshotName)); - SnapshotsStatusResponse snapshotStatusResponse = clusterAdmin().prepareSnapshotStatus(repoName).setSnapshots(snapshotName).get(); + SnapshotsStatusResponse snapshotStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName) + .setSnapshots(snapshotName) + .get(); assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); assertThat(snapshotStatusResponse.getSnapshots().get(0).getSnapshot().getSnapshotId().getName(), equalTo(snapshotName)); @@ -1578,15 +1641,19 @@ public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { SnapshotException ex = expectThrows( SnapshotException.class, - clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName).setRestoreGlobalState(true).setWaitForCompletion(true) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setRestoreGlobalState(true) + .setWaitForCompletion(true) ); assertThat(ex.getRepositoryName(), equalTo(repoName)); assertThat(ex.getSnapshotName(), equalTo(snapshotName)); assertThat(ex.getMessage(), containsString("failed to read global metadata")); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName) - .setWaitForCompletion(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotName + ).setWaitForCompletion(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(snapshotInfo.successfulShards())); @@ -1642,7 +1709,7 @@ public void testRestoreSnapshotWithCorruptedIndexMetadata() throws Exception { outChan.truncate(randomInt(10)); } - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots(); + List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); @@ -1651,7 +1718,7 @@ public void testRestoreSnapshotWithCorruptedIndexMetadata() throws Exception { Predicate isRestorableIndex = index -> corruptedIndex.getName().equals(index) == false; - clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setIndices(nbDocsPerIndex.keySet().stream().filter(isRestorableIndex).toArray(String[]::new)) .setRestoreGlobalState(randomBoolean()) .setWaitForCompletion(true) @@ -1688,7 +1755,7 @@ public void testCannotCreateSnapshotsWithSameName() throws Exception { try { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repositoryName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -1778,7 +1845,7 @@ public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception { assertAcked( client.admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("mock") .setVerify(false) .setSettings( @@ -1806,7 +1873,7 @@ public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception { try { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices("test-idx") .get(); @@ -1834,7 +1901,7 @@ public void testGetSnapshotsFromIndexBlobOnly() throws Exception { assertAcked( client.admin() .cluster() - .preparePutRepository("test-repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("fs") .setVerify(false) .setSettings(Settings.builder().put("location", repoPath)) @@ -1875,18 +1942,27 @@ public void testGetSnapshotsFromIndexBlobOnly() throws Exception { } logger.info("--> verify _all returns snapshot info"); - GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("_all").setVerbose(false).get(); + GetSnapshotsResponse response = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots("_all") + .setVerbose(false) + .get(); assertEquals(indicesPerSnapshot.size(), response.getSnapshots().size()); verifySnapshotInfo(response, indicesPerSnapshot); logger.info("--> verify wildcard returns snapshot info"); - response = clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap-*").setVerbose(false).get(); + response = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots("test-snap-*") + .setVerbose(false) + .get(); assertEquals(indicesPerSnapshot.size(), response.getSnapshots().size()); verifySnapshotInfo(response, indicesPerSnapshot); logger.info("--> verify individual requests return snapshot info"); for (int i = 0; i < numSnapshots; i++) { - response = clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap-" + i).setVerbose(false).get(); + response = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots("test-snap-" + i) + .setVerbose(false) + .get(); assertEquals(1, response.getSnapshots().size()); verifySnapshotInfo(response, indicesPerSnapshot); } @@ -1927,7 +2003,7 @@ public void testSnapshottingWithMissingSequenceNumbers() throws Exception { logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .execute() .get(); @@ -1986,7 +2062,7 @@ public void testSnapshotDifferentIndicesBySameName() throws InterruptedException assertThat(snapshot2.successfulShards(), is(newShardCount)); logger.info("--> restoring snapshot 1"); - clusterAdmin().prepareRestoreSnapshot(repoName, "snap-1") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snap-1") .setIndices(indexName) .setRenamePattern(indexName) .setRenameReplacement("restored-1") @@ -1994,7 +2070,7 @@ public void testSnapshotDifferentIndicesBySameName() throws InterruptedException .get(); logger.info("--> restoring snapshot 2"); - clusterAdmin().prepareRestoreSnapshot(repoName, "snap-2") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snap-2") .setIndices(indexName) .setRenamePattern(indexName) .setRenameReplacement("restored-2") @@ -2019,7 +2095,7 @@ public void testSnapshotDifferentIndicesBySameName() throws InterruptedException } assertAcked(startDeleteSnapshot(repoName, snapshotToDelete).get()); logger.info("--> restoring snapshot [{}]", snapshotToRestore); - clusterAdmin().prepareRestoreSnapshot(repoName, snapshotToRestore) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotToRestore) .setIndices(indexName) .setRenamePattern(indexName) .setRenameReplacement("restored-3") @@ -2048,8 +2124,8 @@ public void testBulkDeleteWithOverlappingPatterns() { } logger.info("--> deleting all snapshots"); - clusterAdmin().prepareDeleteSnapshot("test-repo", "test-snap-*", "*").get(); - final GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots("test-repo").get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-*", "*").get(); + final GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").get(); assertThat(getSnapshotsResponse.getSnapshots(), empty()); } @@ -2081,7 +2157,7 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { List snapshotInfos = client.admin() .cluster() - .prepareGetSnapshots(repoName) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) .setSnapshots(randomFrom(snapName, "_all", "*", "*-snap", "test*")) .get() .getSnapshots(); @@ -2095,11 +2171,11 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { // Verify that hidden indices get restored with a wildcard restore { - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapName) - .setWaitForCompletion(true) - .setIndices("*") - .execute() - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapName + ).setWaitForCompletion(true).setIndices("*").execute().get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), @@ -2115,11 +2191,11 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { // Verify that exclusions work on hidden indices { - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapName) - .setWaitForCompletion(true) - .setIndices("*", "-.*") - .execute() - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapName + ).setWaitForCompletion(true).setIndices("*", "-.*").execute().get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), @@ -2135,11 +2211,11 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { // Verify that hidden indices can be restored with a non-star pattern { - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapName) - .setWaitForCompletion(true) - .setIndices("hid*") - .execute() - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapName + ).setWaitForCompletion(true).setIndices("hid*").execute().get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), @@ -2155,10 +2231,11 @@ public void testHiddenIndicesIncludedInSnapshot() throws Exception { // Verify that hidden indices can be restored by fully specified name { - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, snapName) - .setWaitForCompletion(true) - .setIndices(dottedHiddenIndex) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapName + ).setWaitForCompletion(true).setIndices(dottedHiddenIndex).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat( restoreSnapshotResponse.getRestoreInfo().successfulShards(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java index c31eafa8444ad..6c91db0ad7228 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java @@ -70,7 +70,11 @@ public void testExceptionWhenRestoringPersistentSettings() { logger.info("--> restore snapshot"); final IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true) + client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setRestoreGlobalState(true) + .setWaitForCompletion(true) ); assertEquals(BrokenSettingPlugin.EXCEPTION.getMessage(), ex.getMessage()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java index 05888fd776641..b0c5e73de5859 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotCustomPluginStateIT.java @@ -107,15 +107,15 @@ public void testIncludeGlobalState() throws Exception { } logger.info("--> snapshot without global state"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-no-global-state") - .setIndices() - .setIncludeGlobalState(false) - .setWaitForCompletion(true) - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-no-global-state" + ).setIndices().setIncludeGlobalState(false).setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); assertThat(getSnapshot("test-repo", "test-snap-no-global-state").state(), equalTo(SnapshotState.SUCCESS)); - SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo") + SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .addSnapshots("test-snap-no-global-state") .get(); assertThat(snapshotsStatusResponse.getSnapshots().size(), equalTo(1)); @@ -123,7 +123,7 @@ public void testIncludeGlobalState() throws Exception { assertThat(snapshotStatus.includeGlobalState(), equalTo(false)); logger.info("--> snapshot with global state"); - createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-with-global-state") + createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-with-global-state") .setIndices() .setIncludeGlobalState(true) .setWaitForCompletion(true) @@ -131,7 +131,9 @@ public void testIncludeGlobalState() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(0)); assertThat(getSnapshot("test-repo", "test-snap-with-global-state").state(), equalTo(SnapshotState.SUCCESS)); - snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-with-global-state").get(); + snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") + .addSnapshots("test-snap-with-global-state") + .get(); assertThat(snapshotsStatusResponse.getSnapshots().size(), equalTo(1)); snapshotStatus = snapshotsStatusResponse.getSnapshots().get(0); assertThat(snapshotStatus.includeGlobalState(), equalTo(true)); @@ -154,10 +156,11 @@ public void testIncludeGlobalState() throws Exception { } logger.info("--> try restoring from snapshot without global state"); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state") - .setWaitForCompletion(true) - .setRestoreGlobalState(false) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-no-global-state" + ).setWaitForCompletion(true).setRestoreGlobalState(false).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(0)); logger.info("--> check that template wasn't restored"); @@ -165,7 +168,7 @@ public void testIncludeGlobalState() throws Exception { assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> restore cluster state"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-with-global-state") + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-with-global-state") .setWaitForCompletion(true) .setRestoreGlobalState(true) .get(); @@ -192,11 +195,11 @@ public void testIncludeGlobalState() throws Exception { createIndexWithRandomDocs("test-idx", 100); logger.info("--> snapshot without global state but with indices"); - createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-no-global-state-with-index") - .setIndices("test-idx") - .setIncludeGlobalState(false) - .setWaitForCompletion(true) - .get(); + createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-no-global-state-with-index" + ).setIndices("test-idx").setIncludeGlobalState(false).setWaitForCompletion(true).get(); assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), greaterThan(0)); assertThat( createSnapshotResponse.getSnapshotInfo().successfulShards(), @@ -221,10 +224,11 @@ public void testIncludeGlobalState() throws Exception { assertIndexTemplateMissing(getIndexTemplatesResponse, "test-template"); logger.info("--> try restoring index and cluster state from snapshot without global state"); - restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-no-global-state-with-index") - .setWaitForCompletion(true) - .setRestoreGlobalState(false) - .get(); + restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap-no-global-state-with-index" + ).setWaitForCompletion(true).setRestoreGlobalState(false).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java index b2494c5bd2b91..773be25a4ca9d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShardsServiceIT.java @@ -48,7 +48,7 @@ public void testRetryPostingSnapshotStatusMessages() throws Exception { String blockedNode = blockNodeWithIndex("test-repo", "test-index"); dataNodeClient().admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(false) .setIndices("test-index") .get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java index a5db88820f8d8..7f90b57204fc8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java @@ -222,7 +222,7 @@ public void testRemoveNodeAndFailoverMasterDuringSnapshot() throws Exception { // and succeeds final var snapshots = safeAwait( SubscribableListener.newForked( - l -> client().admin().cluster().getSnapshots(new GetSnapshotsRequest(repoName), l) + l -> client().admin().cluster().getSnapshots(new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, repoName), l) ) ).getSnapshots(); assertThat(snapshots, hasSize(1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index c3da91bde254d..600a3953d9bda 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -85,13 +85,13 @@ public void testStatusApiConsistency() throws Exception { createFullSnapshot("test-repo", "test-snap"); - List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").get().getSnapshots(); + List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").get().getSnapshots(); assertThat(snapshotInfos.size(), equalTo(1)); SnapshotInfo snapshotInfo = snapshotInfos.get(0); assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfo.version(), equalTo(IndexVersion.current())); - final List snapshotStatus = clusterAdmin().prepareSnapshotStatus("test-repo") + final List snapshotStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .get() .getSnapshots(); @@ -121,7 +121,12 @@ public void testStatusAPICallInProgressSnapshot() throws Exception { awaitNumberOfSnapshotsInProgress(1); assertEquals( SnapshotsInProgress.State.STARTED, - clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).getState() + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots("test-snap") + .get() + .getSnapshots() + .get(0) + .getState() ); logger.info("--> unblock all data nodes"); @@ -140,7 +145,10 @@ public void testExceptionOnMissingSnapBlob() throws IOException { logger.info("--> delete snap-${uuid}.dat file for this snapshot to simulate concurrent delete"); IOUtils.rm(repoPath.resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat")); - expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap")); + expectThrows( + SnapshotMissingException.class, + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo").setSnapshots("test-snap") + ); } public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { @@ -169,7 +177,10 @@ public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { .resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat") ); - expectThrows(SnapshotMissingException.class, clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap")); + expectThrows( + SnapshotMissingException.class, + clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo").setSnapshots("test-snap") + ); } public void testGetSnapshotsWithoutIndices() throws Exception { @@ -177,12 +188,18 @@ public void testGetSnapshotsWithoutIndices() throws Exception { logger.info("--> snapshot"); final SnapshotInfo snapshotInfo = assertSuccessful( - clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap").setIndices().setWaitForCompletion(true).execute() + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") + .setIndices() + .setWaitForCompletion(true) + .execute() ); assertThat(snapshotInfo.totalShards(), is(0)); logger.info("--> verify that snapshot without index shows up in non-verbose listing"); - final List snapshotInfos = clusterAdmin().prepareGetSnapshots("test-repo").setVerbose(false).get().getSnapshots(); + final List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setVerbose(false) + .get() + .getSnapshots(); assertThat(snapshotInfos, hasSize(1)); final SnapshotInfo found = snapshotInfos.get(0); assertThat(found.snapshotId(), is(snapshotInfo.snapshotId())); @@ -221,7 +238,7 @@ public void testCorrectCountsForDoneShards() throws Exception { final ActionFuture responseSnapshotOne = internalCluster().masterClient() .admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotOne) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotOne) .setWaitForCompletion(true) .execute(); @@ -256,9 +273,11 @@ public void testCorrectCountsForDoneShards() throws Exception { blockDataNode(repoName, dataNodeTwo); final String snapshotTwo = "snap-2"; - final ActionFuture responseSnapshotTwo = clusterAdmin().prepareCreateSnapshot(repoName, snapshotTwo) - .setWaitForCompletion(true) - .execute(); + final ActionFuture responseSnapshotTwo = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + snapshotTwo + ).setWaitForCompletion(true).execute(); waitForBlock(dataNodeTwo, repoName); @@ -292,7 +311,7 @@ public void testCorrectCountsForDoneShards() throws Exception { public void testGetSnapshotsNoRepos() { ensureGreen(); - GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots(new String[] { "_all" }) + GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, new String[] { "_all" }) .setSnapshots(randomFrom("_all", "*")) .get(); @@ -322,7 +341,7 @@ public void testGetSnapshotsMultipleRepos() throws Exception { assertAcked( client.admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType("fs") .setSettings(Settings.builder().put("location", repoPath).build()) ); @@ -340,7 +359,7 @@ public void testGetSnapshotsMultipleRepos() throws Exception { logger.info("--> create snapshot with index {} and name {} in repository {}", snapshotIndex, snapshotName, repoName); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -358,7 +377,10 @@ public void testGetSnapshotsMultipleRepos() throws Exception { logger.info("--> get and verify snapshots"); GetSnapshotsResponse getSnapshotsResponse = client.admin() .cluster() - .prepareGetSnapshots(randomFrom(new String[] { "_all" }, new String[] { "repo*" }, repoList.toArray(new String[0]))) + .prepareGetSnapshots( + TEST_REQUEST_TIMEOUT, + randomFrom(new String[] { "_all" }, new String[] { "repo*" }, repoList.toArray(new String[0])) + ) .setSnapshots(randomFrom("_all", "*")) .get(); @@ -375,7 +397,7 @@ public void testGetSnapshotsMultipleRepos() throws Exception { logger.info("--> specify all snapshot names with ignoreUnavailable=false"); GetSnapshotsResponse getSnapshotsResponse2 = client.admin() .cluster() - .prepareGetSnapshots(randomFrom("_all", "repo*")) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, randomFrom("_all", "repo*")) .setIgnoreUnavailable(false) .setSnapshots(snapshotList.toArray(new String[0])) .get(); @@ -387,7 +409,7 @@ public void testGetSnapshotsMultipleRepos() throws Exception { logger.info("--> specify all snapshot names with ignoreUnavailable=true"); GetSnapshotsResponse getSnapshotsResponse3 = client.admin() .cluster() - .prepareGetSnapshots(randomFrom("_all", "repo*")) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, randomFrom("_all", "repo*")) .setIgnoreUnavailable(true) .setSnapshots(snapshotList.toArray(new String[0])) .get(); @@ -432,7 +454,7 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { return successShards == shards.size() - 1 && initShards == 1; }); - GetSnapshotsResponse response1 = clusterAdmin().prepareGetSnapshots("test-repo") + GetSnapshotsResponse response1 = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots("test-snap") .setIgnoreUnavailable(true) .get(); @@ -441,13 +463,13 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { SnapshotInfo snapshotInfo = snapshotInfoList.get(0); assertEquals(SnapshotState.IN_PROGRESS, snapshotInfo.state()); - SnapshotStatus snapshotStatus = clusterAdmin().prepareSnapshotStatus().get().getSnapshots().get(0); + SnapshotStatus snapshotStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT).get().getSnapshots().get(0); assertThat(snapshotInfo.totalShards(), equalTo(snapshotStatus.getIndices().get(indexName).getShardsStats().getTotalShards())); assertThat(snapshotInfo.successfulShards(), equalTo(snapshotStatus.getIndices().get(indexName).getShardsStats().getDoneShards())); assertThat(snapshotInfo.shardFailures().size(), equalTo(0)); String notExistedSnapshotName = "snapshot_not_exist"; - GetSnapshotsResponse response2 = clusterAdmin().prepareGetSnapshots("test-repo") + GetSnapshotsResponse response2 = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots(notExistedSnapshotName) .setIgnoreUnavailable(true) .get(); @@ -455,7 +477,9 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { expectThrows( SnapshotMissingException.class, - clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots(notExistedSnapshotName).setIgnoreUnavailable(false) + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") + .setSnapshots(notExistedSnapshotName) + .setIgnoreUnavailable(false) ); logger.info("--> unblock all data nodes"); @@ -475,7 +499,9 @@ public void testSnapshotStatusOnFailedSnapshot() throws Exception { ensureGreen(); indexRandomDocs("test-idx-good", randomIntBetween(1, 5)); - final SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(repoName).setSnapshots(snapshot).get(); + final SnapshotsStatusResponse snapshotsStatusResponse = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName) + .setSnapshots(snapshot) + .get(); assertEquals(1, snapshotsStatusResponse.getSnapshots().size()); assertEquals(SnapshotsInProgress.State.FAILED, snapshotsStatusResponse.getSnapshots().get(0).getState()); } @@ -498,12 +524,12 @@ public void testGetSnapshotsRequest() throws Exception { logger.info("--> get snapshots on an empty repository"); expectThrows( SnapshotMissingException.class, - client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots("non-existent-snapshot") + client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName).addSnapshots("non-existent-snapshot") ); // with ignore unavailable set to true, should not throw an exception GetSnapshotsResponse getSnapshotsResponse = client.admin() .cluster() - .prepareGetSnapshots(repositoryName) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName) .setIgnoreUnavailable(true) .addSnapshots("non-existent-snapshot") .get(); @@ -521,14 +547,14 @@ public void testGetSnapshotsRequest() throws Exception { final String initialBlockedNode = blockNodeWithIndex(repositoryName, indexName); client.admin() .cluster() - .prepareCreateSnapshot(repositoryName, "snap-on-empty-repo") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, "snap-on-empty-repo") .setWaitForCompletion(false) .setIndices(indexName) .get(); waitForBlock(initialBlockedNode, repositoryName); // wait for block to kick in getSnapshotsResponse = client.admin() .cluster() - .prepareGetSnapshots("test-repo") + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots(randomFrom("_all", "_current", "snap-on-*", "*-on-empty-repo", "snap-on-empty-repo")) .get(); assertEquals(1, getSnapshotsResponse.getSnapshots().size()); @@ -543,7 +569,7 @@ public void testGetSnapshotsRequest() throws Exception { final String snapshotName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repositoryName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -563,7 +589,7 @@ public void testGetSnapshotsRequest() throws Exception { final String blockedNode = blockNodeWithIndex(repositoryName, indexName); client.admin() .cluster() - .prepareCreateSnapshot(repositoryName, inProgressSnapshot) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, inProgressSnapshot) .setWaitForCompletion(false) .setIndices(indexName) .get(); @@ -583,7 +609,7 @@ public void testGetSnapshotsRequest() throws Exception { } getSnapshotsResponse = client.admin() .cluster() - .prepareGetSnapshots(repositoryName) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName) .setSnapshots(snapshotsToGet.toArray(Strings.EMPTY_ARRAY)) .get(); List sortedNames = Arrays.asList(snapshotNames); @@ -591,7 +617,11 @@ public void testGetSnapshotsRequest() throws Exception { assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots)); assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().toList(), equalTo(sortedNames)); - getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots(snapshotNames).get(); + getSnapshotsResponse = client.admin() + .cluster() + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName) + .addSnapshots(snapshotNames) + .get(); sortedNames = Arrays.asList(snapshotNames); Collections.sort(sortedNames); assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots)); @@ -604,7 +634,7 @@ public void testGetSnapshotsRequest() throws Exception { final String secondRegex = "*" + regexName.substring(splitPos); getSnapshotsResponse = client.admin() .cluster() - .prepareGetSnapshots(repositoryName) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName) .addSnapshots(snapshotNames) .addSnapshots(firstRegex, secondRegex) .get(); @@ -644,7 +674,7 @@ public void testConcurrentCreateAndStatusAPICalls() throws Exception { createsListener.map(ignored -> null) ); for (final var snapshotName : snapshotNames) { - clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(waitForCompletion) .execute(createsGroupedListener); } @@ -655,16 +685,24 @@ public void testConcurrentCreateAndStatusAPICalls() throws Exception { .info(ThreadPool.Names.SNAPSHOT_META) .getMax(); for (int i = 0; i < metaThreadPoolSize * 2; i++) { - statuses.add(dataNodeClient.admin().cluster().prepareSnapshotStatus(repoName).setSnapshots(snapshotNames).execute()); - gets.add(dataNodeClient.admin().cluster().prepareGetSnapshots(repoName).setSnapshots(snapshotNames).execute()); + statuses.add( + dataNodeClient.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotNames).execute() + ); + gets.add( + dataNodeClient.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotNames).execute() + ); } // ... and then some more status requests until all snapshots are done var masterClusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); assertBusy(() -> { final var stillRunning = SnapshotsInProgress.get(masterClusterService.state()).isEmpty() == false; - statuses.add(dataNodeClient.admin().cluster().prepareSnapshotStatus(repoName).setSnapshots(snapshotNames).execute()); - gets.add(dataNodeClient.admin().cluster().prepareGetSnapshots(repoName).setSnapshots(snapshotNames).execute()); + statuses.add( + dataNodeClient.admin().cluster().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotNames).execute() + ); + gets.add( + dataNodeClient.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotNames).execute() + ); assertFalse(stillRunning); }, 60, TimeUnit.SECONDS); @@ -696,14 +734,16 @@ public void testInfiniteTimeout() throws Exception { indexRandomDocs("test-idx", 10); ensureGreen(); blockAllDataNodes("test-repo"); - final ActionFuture snapshotResponseFuture = clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap") - .setWaitForCompletion(true) - .execute(); + final ActionFuture snapshotResponseFuture = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setWaitForCompletion(true).execute(); try { waitForBlockOnAnyDataNode("test-repo"); // Make sure that the create-snapshot task completes on master assertFalse(clusterAdmin().prepareHealth().setWaitForEvents(Priority.LANGUID).get().isTimedOut()); - final List snapshotStatus = clusterAdmin().prepareSnapshotStatus("test-repo") + final List snapshotStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, "test-repo") .setMasterNodeTimeout(TimeValue.MINUS_ONE) .get() .getSnapshots(); @@ -724,7 +764,11 @@ private static SnapshotIndexShardStatus stateFirstShard(SnapshotStatus snapshotS private static SnapshotStatus getSnapshotStatus(String repoName, String snapshotName) { try { - return clusterAdmin().prepareSnapshotStatus(repoName).setSnapshots(snapshotName).get().getSnapshots().get(0); + return clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repoName) + .setSnapshots(snapshotName) + .get() + .getSnapshots() + .get(0); } catch (SnapshotMissingException e) { throw new AssertionError(e); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index b759993be26df..9bcddd5c58d66 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -579,6 +579,7 @@ && randomBoolean() closeIndicesStep.addListener(mustSucceed(ignored1 -> deleteIndicesStep.addListener(mustSucceed(ignored2 -> { final RestoreSnapshotRequestBuilder restoreSnapshotRequestBuilder = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, snapshotInfo.repository(), snapshotInfo.snapshotId().getName() ); @@ -711,7 +712,12 @@ private void startCloner() { client.admin() .cluster() - .prepareCloneSnapshot(trackedSnapshot.trackedRepository.repositoryName, trackedSnapshot.snapshotName, cloneName) + .prepareCloneSnapshot( + TEST_REQUEST_TIMEOUT, + trackedSnapshot.trackedRepository.repositoryName, + trackedSnapshot.snapshotName, + cloneName + ) .setIndices(indexNames.toArray(new String[0])) .execute(mustSucceed(acknowledgedResponse -> { Releasables.close(releaseAll); @@ -759,7 +765,7 @@ private void startSnapshotDeleter() { client.admin() .cluster() - .prepareDeleteSnapshot(targetRepository.repositoryName, snapshotNames.toArray(new String[0])) + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, targetRepository.repositoryName, snapshotNames.toArray(new String[0])) .execute(mustSucceed(acknowledgedResponse -> { assertTrue(acknowledgedResponse.isAcknowledged()); for (String snapshotName : snapshotNames) { @@ -828,7 +834,7 @@ private void startCleaner() { logger.info("--> starting cleanup of [{}]", trackedRepository.repositoryName); client.admin() .cluster() - .prepareCleanupRepository(trackedRepository.repositoryName) + .prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, trackedRepository.repositoryName) .execute(mustSucceed(cleanupRepositoryResponse -> { final RepositoryCleanupResult result = cleanupRepositoryResponse.result(); if (result.bytes() > 0L || result.blobs() > 0L) { @@ -838,7 +844,7 @@ private void startCleaner() { // concurrent operations on the repository. client.admin() .cluster() - .prepareCleanupRepository(trackedRepository.repositoryName) + .prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, trackedRepository.repositoryName) .execute(mustSucceed(secondCleanupRepositoryResponse -> { final RepositoryCleanupResult secondCleanupResult = secondCleanupRepositoryResponse.result(); if (secondCleanupResult.blobs() == 1) { @@ -933,6 +939,7 @@ private void startSnapshotter() { ); final CreateSnapshotRequestBuilder createSnapshotRequestBuilder = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, trackedRepository.repositoryName, snapshotName ); @@ -1026,6 +1033,7 @@ private void startPartialSnapshotter() { ); final CreateSnapshotRequestBuilder createSnapshotRequestBuilder = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, trackedRepository.repositoryName, snapshotName ).setPartial(true); @@ -1046,7 +1054,7 @@ private void startPartialSnapshotter() { final DeleteSnapshotRequestBuilder deleteSnapshotRequestBuilder = abortClient.admin() .cluster() - .prepareDeleteSnapshot(trackedRepository.repositoryName, snapshotName); + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, trackedRepository.repositoryName, snapshotName); final Releasable abortReleasable = abortReleasables.transfer(); @@ -1119,7 +1127,7 @@ private void pollForSnapshotCompletion( mustSucceed( () -> client.admin() .cluster() - .prepareGetSnapshots(repositoryName) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repositoryName) .setCurrentSnapshot() .execute(mustSucceed(getSnapshotsResponse -> { if (getSnapshotsResponse.getSnapshots() @@ -1365,7 +1373,7 @@ private void putRepositoryAndContinue(Client client, boolean nodeMightRestart, R logger.info("--> put repo [{}]", repositoryName); client.admin() .cluster() - .preparePutRepository(repositoryName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put(FsRepository.LOCATION_SETTING.getKey(), location)) .setVerify(nodeMightRestart == false) @@ -1402,11 +1410,12 @@ private void scheduleRemoveAndAdd() { final Releasable releaseAll = localReleasables.transfer(); logger.info("--> delete repo [{}]", repositoryName); - clusterAdmin().prepareDeleteRepository(repositoryName).execute(mustSucceed(acknowledgedResponse -> { - assertTrue(acknowledgedResponse.isAcknowledged()); - logger.info("--> finished delete repo [{}]", repositoryName); - putRepositoryAndContinue(client, nodeMightRestart, releaseAll); - })); + clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) + .execute(mustSucceed(acknowledgedResponse -> { + assertTrue(acknowledgedResponse.isAcknowledged()); + logger.info("--> finished delete repo [{}]", repositoryName); + putRepositoryAndContinue(client, nodeMightRestart, releaseAll); + })); replacingRepo = true; } finally { @@ -1708,7 +1717,7 @@ void getSnapshotInfo(Client client, ActionListener listener) { ); client.admin() .cluster() - .prepareGetSnapshots(trackedRepository.repositoryName) + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, trackedRepository.repositoryName) .setSnapshots(snapshotName) .execute(mustSucceed(getSnapshotsResponse -> { assertThat(getSnapshotsResponse.getSnapshots(), hasSize(1)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java index 4d2d310955a3d..c1b3203b15666 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotThrottlingIT.java @@ -48,12 +48,11 @@ private Tuple testThrottledRepository(String maxSnapshotBytesPerSec, .put("max_restore_bytes_per_sec", maxRestoreBytesPerSec) ); createSnapshot("test-repo", "test-snap", Collections.singletonList("test-idx")); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") - .setRenamePattern("test-") - .setRenameReplacement("test2-") - .setWaitForCompletion(true) - .execute() - .actionGet(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + "test-repo", + "test-snap" + ).setRenamePattern("test-").setRenameReplacement("test2-").setWaitForCompletion(true).execute().actionGet(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); assertDocCount("test-idx", 50L); long snapshotPause = 0L; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java index 854d5f39ddaad..6bdb70bfba3e2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java @@ -118,7 +118,7 @@ public void testNoDoubleFinalization() throws Exception { .equals(Set.of(SnapshotsInProgress.ShardState.QUEUED, SnapshotsInProgress.ShardState.MISSING)) ); }); - clusterAdmin().prepareCreateSnapshot(repoName, "snap-2") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snap-2") .setIndices("index-2", "index-3") .setPartial(true) .setWaitForCompletion(false) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java index e68a60201931a..ca9fdc1284e83 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java @@ -124,7 +124,9 @@ public void testDeleteSnapshotWhenNotWaitingForCompletion() throws Exception { SubscribableListener snapshotDeletionListener = createSnapshotDeletionListener("test-repo"); repository.blockOnDataFiles(); try { - clusterAdmin().prepareDeleteSnapshot("test-repo", "test-snapshot").setWaitForCompletion(false).execute(listener); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snapshot") + .setWaitForCompletion(false) + .execute(listener); // The request will complete as soon as the deletion is scheduled safeGet(listener); // The deletion won't complete until the block is removed @@ -144,7 +146,9 @@ public void testDeleteSnapshotWhenWaitingForCompletion() throws Exception { SubscribableListener snapshotDeletionListener = createSnapshotDeletionListener("test-repo"); repository.blockOnDataFiles(); try { - clusterAdmin().prepareDeleteSnapshot("test-repo", "test-snapshot").setWaitForCompletion(true).execute(requestCompleteListener); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snapshot") + .setWaitForCompletion(true) + .execute(requestCompleteListener); // Neither the request nor the deletion will complete until we remove the block assertFalse(requestCompleteListener.isDone()); assertFalse(snapshotDeletionListener.isDone()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java index 058d5af7d9c85..706ceaad7905c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java @@ -83,10 +83,11 @@ public void testRestoreSystemIndicesAsGlobalState() { assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L)); // restore snapshot with global state, without closing the system index - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setWaitForCompletion(true).setRestoreGlobalState(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // verify only the original document is restored @@ -102,15 +103,15 @@ public void testSnapshotWithoutGlobalState() { indexDoc("not-a-system-index", "1", "purpose", "non system index doc"); // run a snapshot without global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(false) .get(); assertSnapshotSuccess(createSnapshotResponse); // check snapshot info for for which - clusterAdmin().prepareGetRepositories(REPO_NAME).get(); - Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(REPO_NAME) + clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, REPO_NAME).get(); + Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME) .get() .getSnapshots() .stream() @@ -132,7 +133,7 @@ public void testSnapshotByFeature() { refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME); // snapshot by feature - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setIncludeGlobalState(true) .setWaitForCompletion(true) .setFeatureStates(SystemIndexTestPlugin.class.getSimpleName(), AnotherSystemIndexTestPlugin.class.getSimpleName()) @@ -148,10 +149,11 @@ public void testSnapshotByFeature() { assertThat(getDocCount(AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L)); // restore indices as global state without closing the index - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setWaitForCompletion(true).setRestoreGlobalState(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // verify only the original document is restored @@ -175,7 +177,7 @@ public void testDefaultRestoreOnlyRegularIndices() { refresh(regularIndex, SystemIndexTestPlugin.SYSTEM_INDEX_NAME); // snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -184,7 +186,7 @@ public void testDefaultRestoreOnlyRegularIndices() { // Delete the regular index so we can restore it assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex)); - RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreResponse.getRestoreInfo().totalShards(), greaterThan(0)); @@ -207,7 +209,7 @@ public void testRestoreByFeature() { refresh(regularIndex, SystemIndexTestPlugin.SYSTEM_INDEX_NAME, AnotherSystemIndexTestPlugin.SYSTEM_INDEX_NAME); // snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -225,10 +227,11 @@ public void testRestoreByFeature() { assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex)); // restore indices by feature - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setWaitForCompletion(true) - .setFeatureStates("SystemIndexTestPlugin") - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setWaitForCompletion(true).setFeatureStates("SystemIndexTestPlugin").get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // verify that the restored system index has only one document @@ -253,14 +256,14 @@ public void testSnapshotAndRestoreAssociatedIndices() { refresh(regularIndex, AssociatedIndicesTestPlugin.SYSTEM_INDEX_NAME, AssociatedIndicesTestPlugin.ASSOCIATED_INDEX_NAME); // snapshot - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setFeatureStates(AssociatedIndicesTestPlugin.class.getSimpleName()) .setWaitForCompletion(true) .get(); assertSnapshotSuccess(createSnapshotResponse); // verify the correctness of the snapshot - Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(REPO_NAME) + Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME) .get() .getSnapshots() .stream() @@ -282,7 +285,11 @@ public void testSnapshotAndRestoreAssociatedIndices() { assertAcked(indicesAdmin().prepareDelete(AssociatedIndicesTestPlugin.ASSOCIATED_INDEX_NAME).get()); // restore the feature state and its associated index - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ) .setIndices(AssociatedIndicesTestPlugin.ASSOCIATED_INDEX_NAME) .setWaitForCompletion(true) .setFeatureStates(AssociatedIndicesTestPlugin.class.getSimpleName()) @@ -303,7 +310,7 @@ public void testRestoreFeatureNotInSnapshot() { refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); // snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -312,7 +319,7 @@ public void testRestoreFeatureNotInSnapshot() { final String fakeFeatureStateName = "NonExistentTestPlugin"; SnapshotRestoreException exception = expectThrows( SnapshotRestoreException.class, - clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setFeatureStates("SystemIndexTestPlugin", fakeFeatureStateName) ); @@ -331,7 +338,7 @@ public void testSnapshottingSystemIndexByNameIsRejected() throws Exception { IllegalArgumentException error = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME) .setWaitForCompletion(true) .setIncludeGlobalState(randomBoolean()) @@ -345,7 +352,7 @@ public void testSnapshottingSystemIndexByNameIsRejected() throws Exception { ); // And create a successful snapshot so we don't upset the test framework - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -362,7 +369,7 @@ public void testRestoringSystemIndexByNameIsRejected() throws IllegalAccessExcep refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); // snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -374,7 +381,7 @@ public void testRestoringSystemIndexByNameIsRejected() throws IllegalAccessExcep IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME) ); @@ -398,7 +405,7 @@ public void testSystemIndicesCannotBeRenamed() { refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); // snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -407,7 +414,7 @@ public void testSystemIndicesCannotBeRenamed() { assertAcked(indicesAdmin().prepareDelete(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, nonSystemIndex).get()); // Restore using a rename pattern that matches both the regular and the system index - clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setRestoreGlobalState(true) .setRenamePattern(".test-(.+)") @@ -433,7 +440,7 @@ public void testRestoreSystemIndicesAsGlobalStateWithDefaultFeatureStateList() { refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME); // run a snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -446,10 +453,11 @@ public void testRestoreSystemIndicesAsGlobalStateWithDefaultFeatureStateList() { assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L)); // restore indices as global state a null list of feature states - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setWaitForCompletion(true).setRestoreGlobalState(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // verify that the system index is destroyed @@ -468,7 +476,7 @@ public void testRestoreSystemIndicesAsGlobalStateWithNoFeatureStates() { refresh(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, regularIndex); // run a snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -482,11 +490,11 @@ public void testRestoreSystemIndicesAsGlobalStateWithNoFeatureStates() { assertThat(getDocCount(SystemIndexTestPlugin.SYSTEM_INDEX_NAME), equalTo(2L)); // restore with global state and all indices but explicitly no feature states. - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .setFeatureStates(new String[] { randomFrom("none", "NONE") }) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setWaitForCompletion(true).setRestoreGlobalState(true).setFeatureStates(new String[] { randomFrom("none", "NONE") }).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // verify that the system index still has the updated document, i.e. has not been restored @@ -516,7 +524,7 @@ public void testAllSystemIndicesAreRemovedWhenThatFeatureStateIsRestored() { indexDoc(regularIndex, "1", "purpose", "pre-snapshot doc"); // run a snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -535,11 +543,11 @@ public void testAllSystemIndicesAreRemovedWhenThatFeatureStateIsRestored() { assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex)); // restore the snapshot - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setFeatureStates("SystemIndexTestPlugin") - .setWaitForCompletion(true) - .setRestoreGlobalState(true) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setFeatureStates("SystemIndexTestPlugin").setWaitForCompletion(true).setRestoreGlobalState(true).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // The index we created after the snapshot should be gone @@ -567,7 +575,7 @@ public void testSystemIndexAliasesAreAlwaysRestored() { assertAcked(indicesAdmin().prepareAliases().addAlias(systemIndexName, systemIndexAlias).addAlias(regularIndex, regularAlias).get()); // run a snapshot including global state - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -577,12 +585,11 @@ public void testSystemIndexAliasesAreAlwaysRestored() { assertAcked(cluster().client().admin().indices().prepareDelete(regularIndex, systemIndexName)); // Now restore the snapshot with no aliases - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") - .setFeatureStates("SystemIndexTestPlugin") - .setWaitForCompletion(true) - .setRestoreGlobalState(false) - .setIncludeAliases(false) - .get(); + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + "test-snap" + ).setFeatureStates("SystemIndexTestPlugin").setWaitForCompletion(true).setRestoreGlobalState(false).setIncludeAliases(false).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); // The regular index should exist @@ -608,7 +615,7 @@ public void testNoneFeatureStateMustBeAlone() { // run a snapshot including global state IllegalArgumentException createEx = expectThrows( IllegalArgumentException.class, - clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(randomBoolean()) .setFeatureStates("SystemIndexTestPlugin", "none", "AnotherSystemIndexTestPlugin") @@ -622,7 +629,7 @@ public void testNoneFeatureStateMustBeAlone() { ); // create a successful snapshot with global state/all features - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -630,7 +637,7 @@ public void testNoneFeatureStateMustBeAlone() { SnapshotRestoreException restoreEx = expectThrows( SnapshotRestoreException.class, - clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setRestoreGlobalState(randomBoolean()) .setFeatureStates("SystemIndexTestPlugin", "none") @@ -659,7 +666,7 @@ public void testNoneFeatureStateOnCreation() { indexDoc(SystemIndexTestPlugin.SYSTEM_INDEX_NAME, "1", "purpose", "pre-snapshot doc"); refresh(regularIndex, SystemIndexTestPlugin.SYSTEM_INDEX_NAME); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(true) .setFeatureStates(randomFrom("none", "NONE")) @@ -667,7 +674,7 @@ public void testNoneFeatureStateOnCreation() { assertSnapshotSuccess(createSnapshotResponse); // Verify that the system index was not included - Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(REPO_NAME) + Set snapshottedIndices = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME) .get() .getSnapshots() .stream() @@ -704,18 +711,18 @@ public void testPartialSnapshotsOfSystemIndexRemovesFeatureState() throws Except // Start a snapshot and wait for it to hit the block, then kill the master to force a failover final String partialSnapName = "test-partial-snap"; - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, partialSnapName) - .setIncludeGlobalState(true) - .setWaitForCompletion(false) - .setPartial(true) - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + partialSnapName + ).setIncludeGlobalState(true).setWaitForCompletion(false).setPartial(true).get(); assertThat(createSnapshotResponse.status(), equalTo(RestStatus.ACCEPTED)); waitForBlock(internalCluster().getMasterName(), REPO_NAME); internalCluster().stopCurrentMasterNode(); // Now get the snapshot and do our checks assertBusy(() -> { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO_NAME) + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME) .setSnapshots(partialSnapName) .get(); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); @@ -761,11 +768,11 @@ public void testParallelIndexDeleteRemovesFeatureState() throws Exception { // Start a snapshot - need to do this async because some blocks will block this call logger.info("--> Blocked repo, starting snapshot..."); final String partialSnapName = "test-partial-snap"; - ActionFuture createSnapshotFuture = clusterAdmin().prepareCreateSnapshot(REPO_NAME, partialSnapName) - .setIncludeGlobalState(true) - .setWaitForCompletion(true) - .setPartial(true) - .execute(); + ActionFuture createSnapshotFuture = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + REPO_NAME, + partialSnapName + ).setIncludeGlobalState(true).setWaitForCompletion(true).setPartial(true).execute(); logger.info("--> Started snapshot, waiting for block..."); waitForBlock(dataNodes.get(1), REPO_NAME); diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index ec02b8a45cd42..0f9c77e810924 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -196,6 +196,7 @@ static TransportVersion def(int id) { public static final TransportVersion VERSION_SUPPORTING_SPARSE_VECTOR_STATS = def(8_687_00_0); public static final TransportVersion ML_AD_OUTPUT_MEMORY_ALLOCATOR_FIELD = def(8_688_00_0); public static final TransportVersion FAILURE_STORE_LAZY_CREATION = def(8_689_00_0); + public static final TransportVersion SNAPSHOT_REQUEST_TIMEOUTS = def(8_690_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java index d0a71d8a94f58..e4615b28af749 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java @@ -7,10 +7,12 @@ */ package org.elasticsearch.action.admin.cluster.repositories.cleanup; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -20,18 +22,34 @@ public class CleanupRepositoryRequest extends AcknowledgedRequest { - public CleanupRepositoryRequestBuilder(ElasticsearchClient client, String repository) { - super(client, TransportCleanupRepositoryAction.TYPE, new CleanupRepositoryRequest(repository)); + public CleanupRepositoryRequestBuilder( + ElasticsearchClient client, + TimeValue masterNodeTimeout, + TimeValue ackTimeout, + String repository + ) { + super(client, TransportCleanupRepositoryAction.TYPE, new CleanupRepositoryRequest(masterNodeTimeout, ackTimeout, repository)); } public CleanupRepositoryRequestBuilder setName(String repository) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index 4892efaf5ae1f..237e241c8900f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -80,7 +80,7 @@ public TransportCleanupRepositoryAction( clusterService, threadPool, actionFilters, - CleanupRepositoryRequest::new, + CleanupRepositoryRequest::readFrom, indexNameExpressionResolver, CleanupRepositoryResponse::new, EsExecutors.DIRECT_EXECUTOR_SERVICE diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java index 17543ba44ae14..cf2317fc143e6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -31,17 +32,13 @@ public DeleteRepositoryRequest(StreamInput in) throws IOException { name = in.readString(); } - public DeleteRepositoryRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); - } - /** * Constructs a new unregister repository request with the provided name. * * @param name name of the repository */ - public DeleteRepositoryRequest(String name) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public DeleteRepositoryRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + super(masterNodeTimeout, ackTimeout); this.name = name; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index 6accb02418df8..22983504e33a2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.core.TimeValue; /** * Builder for unregister repository request @@ -23,8 +24,8 @@ public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder< /** * Constructs unregister repository request builder with specified repository name */ - public DeleteRepositoryRequestBuilder(ElasticsearchClient client, String name) { - super(client, TransportDeleteRepositoryAction.TYPE, new DeleteRepositoryRequest(name)); + public DeleteRepositoryRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + super(client, TransportDeleteRepositoryAction.TYPE, new DeleteRepositoryRequest(masterNodeTimeout, ackTimeout, name)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java index 6d7a51420a34b..c8670c40f6aed 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java @@ -10,9 +10,9 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeReadRequest; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -23,11 +23,7 @@ */ public class GetRepositoriesRequest extends MasterNodeReadRequest { - private String[] repositories = Strings.EMPTY_ARRAY; - - public GetRepositoriesRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); - } + private String[] repositories; /** * Constructs a new get repositories request with a list of repositories. @@ -35,10 +31,10 @@ public GetRepositoriesRequest() { * If the list of repositories is empty or it contains a single element "_all", all registered repositories * are returned. * - * @param repositories list of repositories + * @param repositories list of repositories */ - public GetRepositoriesRequest(String[] repositories) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public GetRepositoriesRequest(TimeValue masterNodeTimeout, String[] repositories) { + super(masterNodeTimeout); this.repositories = repositories; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java index f9d0c429c2ee8..04a7655a60e4f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.core.TimeValue; /** * Get repository request builder @@ -23,8 +24,8 @@ public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationReques /** * Creates new get repository request builder */ - public GetRepositoriesRequestBuilder(ElasticsearchClient client, String... repositories) { - super(client, GetRepositoriesAction.INSTANCE, new GetRepositoriesRequest(repositories)); + public GetRepositoriesRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, String... repositories) { + super(client, GetRepositoriesAction.INSTANCE, new GetRepositoriesRequest(masterNodeTimeout, repositories)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index 9cee77969eb9a..cda1df3dc1c2c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; @@ -47,15 +48,15 @@ public PutRepositoryRequest(StreamInput in) throws IOException { verify = in.readBoolean(); } - public PutRepositoryRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public PutRepositoryRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout) { + super(masterNodeTimeout, ackTimeout); } /** * Constructs a new put repository request with the provided name. */ - public PutRepositoryRequest(String name) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public PutRepositoryRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + this(masterNodeTimeout, ackTimeout); this.name = name; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index 86ed38c2ddad9..78ad0dbdfa999 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.XContentType; import java.util.Map; @@ -27,8 +28,8 @@ public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder< /** * Constructs register repository request for the repository with a given name */ - public PutRepositoryRequestBuilder(ElasticsearchClient client, String name) { - super(client, TransportPutRepositoryAction.TYPE, new PutRepositoryRequest(name)); + public PutRepositoryRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + super(client, TransportPutRepositoryAction.TYPE, new PutRepositoryRequest(masterNodeTimeout, ackTimeout, name)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/reservedstate/ReservedRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/reservedstate/ReservedRepositoryAction.java index 59f254cf3636a..d3dc7c916f066 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/reservedstate/ReservedRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/reservedstate/ReservedRepositoryAction.java @@ -82,7 +82,7 @@ public TransformState transform(Object source, TransformState prevState) throws toDelete.removeAll(entities); for (var repositoryToDelete : toDelete) { - var task = new RepositoriesService.UnregisterRepositoryTask(repositoryToDelete); + var task = new RepositoriesService.UnregisterRepositoryTask(DUMMY_TIMEOUT, repositoryToDelete); state = task.execute(state); } @@ -97,7 +97,7 @@ public List fromXContent(XContentParser parser) throws IOE Map source = parser.map(); for (var entry : source.entrySet()) { - PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(entry.getKey()); + PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(DUMMY_TIMEOUT, DUMMY_TIMEOUT, entry.getKey()); @SuppressWarnings("unchecked") Map content = (Map) entry.getValue(); try (XContentParser repoParser = mapToXContentParser(XContentParserConfiguration.EMPTY, content)) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java index 971e1af5ea1bd..0876e48f929c8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -29,17 +30,17 @@ public VerifyRepositoryRequest(StreamInput in) throws IOException { name = in.readString(); } - public VerifyRepositoryRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public VerifyRepositoryRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout) { + super(masterNodeTimeout, ackTimeout); } /** * Constructs a new unregister repository request with the provided name. * - * @param name name of the repository + * @param name name of the repository */ - public VerifyRepositoryRequest(String name) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT); + public VerifyRepositoryRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + this(masterNodeTimeout, ackTimeout); this.name = name; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java index 798fad15734ed..d756ca93133c3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.core.TimeValue; /** * Builder for verify repository request @@ -22,8 +23,8 @@ public class VerifyRepositoryRequestBuilder extends MasterNodeOperationRequestBu /** * Constructs unregister repository request builder with specified repository name */ - public VerifyRepositoryRequestBuilder(ElasticsearchClient client, String name) { - super(client, VerifyRepositoryAction.INSTANCE, new VerifyRepositoryRequest(name)); + public VerifyRepositoryRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + super(client, VerifyRepositoryAction.INSTANCE, new VerifyRepositoryRequest(masterNodeTimeout, ackTimeout, name)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java index 2c7f1a703b64d..dafd35f09f12f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -52,8 +53,8 @@ public CloneSnapshotRequest(StreamInput in) throws IOException { * @param target target snapshot name * @param indices indices to clone from source to target */ - public CloneSnapshotRequest(String repository, String source, String target, String[] indices) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public CloneSnapshotRequest(TimeValue masterNodeTimeout, String repository, String source, String target, String[] indices) { + super(masterNodeTimeout); this.repository = repository; this.source = source; this.target = target; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java index 818f0fadf92ef..a2726505dd834 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java @@ -13,14 +13,25 @@ import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.core.TimeValue; public class CloneSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder< CloneSnapshotRequest, AcknowledgedResponse, CloneSnapshotRequestBuilder> { - public CloneSnapshotRequestBuilder(ElasticsearchClient client, String repository, String source, String target) { - super(client, TransportCloneSnapshotAction.TYPE, new CloneSnapshotRequest(repository, source, target, Strings.EMPTY_ARRAY)); + public CloneSnapshotRequestBuilder( + ElasticsearchClient client, + TimeValue masterNodeTimeout, + String repository, + String source, + String target + ) { + super( + client, + TransportCloneSnapshotAction.TYPE, + new CloneSnapshotRequest(masterNodeTimeout, repository, source, target, Strings.EMPTY_ARRAY) + ); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index c2fd49eb91a42..2c460319e3d86 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -81,8 +82,8 @@ public class CreateSnapshotRequest extends MasterNodeRequest userMetadata; - public CreateSnapshotRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public CreateSnapshotRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } /** @@ -91,8 +92,8 @@ public CreateSnapshotRequest() { * @param repository repository name * @param snapshot snapshot name */ - public CreateSnapshotRequest(String repository, String snapshot) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public CreateSnapshotRequest(TimeValue masterNodeTimeout, String repository, String snapshot) { + this(masterNodeTimeout); this.snapshot = snapshot; this.repository = repository; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index 7f093b577fd57..983bb6e5d3a7c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import java.util.Map; @@ -26,8 +27,8 @@ public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuil /** * Constructs a new create snapshot request builder with specified repository and snapshot names */ - public CreateSnapshotRequestBuilder(ElasticsearchClient client, String repository, String snapshot) { - super(client, TransportCreateSnapshotAction.TYPE, new CreateSnapshotRequest(repository, snapshot)); + public CreateSnapshotRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, String repository, String snapshot) { + super(client, TransportCreateSnapshotAction.TYPE, new CreateSnapshotRequest(masterNodeTimeout, repository, snapshot)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java index 2356087d64e41..771bab8d4d6b2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; import java.util.Arrays; @@ -39,8 +40,8 @@ public class DeleteSnapshotRequest extends MasterNodeRequest { - public GetSnapshottableFeaturesRequest() { - - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public GetSnapshottableFeaturesRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } public GetSnapshottableFeaturesRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java index 0dcd5762b0b08..ca867625272f9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import java.io.IOException; @@ -22,8 +23,8 @@ public static ResetFeatureStateRequest fromStream(StreamInput in) throws IOExcep return new ResetFeatureStateRequest(in); } - public ResetFeatureStateRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public ResetFeatureStateRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } private ResetFeatureStateRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 2aa64fcf0bad6..8ef828d07d8b0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -76,8 +77,8 @@ public class GetSnapshotsRequest extends MasterNodeRequest private boolean includeIndexNames = true; - public GetSnapshotsRequest() { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public GetSnapshotsRequest(TimeValue masterNodeTimeout) { + super(masterNodeTimeout); } /** @@ -86,9 +87,8 @@ public GetSnapshotsRequest() { * @param repositories repository names * @param snapshots list of snapshots */ - public GetSnapshotsRequest(String[] repositories, String[] snapshots) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); - this.repositories = repositories; + public GetSnapshotsRequest(TimeValue masterNodeTimeout, String[] repositories, String[] snapshots) { + this(masterNodeTimeout, repositories); this.snapshots = snapshots; } @@ -97,8 +97,8 @@ public GetSnapshotsRequest(String[] repositories, String[] snapshots) { * * @param repositories repository names */ - public GetSnapshotsRequest(String... repositories) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + public GetSnapshotsRequest(TimeValue masterNodeTimeout, String... repositories) { + this(masterNodeTimeout); this.repositories = repositories; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index f3ef2fa0bda1e..54583a3ba23da 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -12,6 +12,7 @@ import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.common.util.ArrayUtils; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.sort.SortOrder; /** @@ -25,8 +26,8 @@ public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilde /** * Constructs the new get snapshot request with specified repositories */ - public GetSnapshotsRequestBuilder(ElasticsearchClient client, String... repositories) { - super(client, TransportGetSnapshotsAction.TYPE, new GetSnapshotsRequest(repositories)); + public GetSnapshotsRequestBuilder(ElasticsearchClient client, TimeValue masterNodeTimeout, String... repositories) { + super(client, TransportGetSnapshotsAction.TYPE, new GetSnapshotsRequest(masterNodeTimeout, repositories)); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java index 7a7cc0c304556..96bedfdbfd108 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -28,8 +29,8 @@ public class GetShardSnapshotRequest extends MasterNodeRequest repositories; private final ShardId shardId; - GetShardSnapshotRequest(List repositories, ShardId shardId) { - super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT); + GetShardSnapshotRequest(TimeValue masterNodeTimeout, List repositories, ShardId shardId) { + super(masterNodeTimeout); assert repositories.isEmpty() == false; assert repositories.stream().noneMatch(Objects::isNull); assert repositories.size() == 1 || repositories.stream().noneMatch(repo -> repo.equals(ALL_REPOSITORIES)); @@ -50,11 +51,15 @@ public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); } - public static GetShardSnapshotRequest latestSnapshotInAllRepositories(ShardId shardId) { - return new GetShardSnapshotRequest(Collections.singletonList(ALL_REPOSITORIES), shardId); + public static GetShardSnapshotRequest latestSnapshotInAllRepositories(TimeValue masterNodeTimeout, ShardId shardId) { + return new GetShardSnapshotRequest(masterNodeTimeout, Collections.singletonList(ALL_REPOSITORIES), shardId); } - public static GetShardSnapshotRequest latestSnapshotInRepositories(ShardId shardId, List repositories) { + public static GetShardSnapshotRequest latestSnapshotInRepositories( + TimeValue masterNodeTimeout, + ShardId shardId, + List repositories + ) { if (repositories.isEmpty()) { throw new IllegalArgumentException("Expected at least 1 repository but got none"); } @@ -62,7 +67,7 @@ public static GetShardSnapshotRequest latestSnapshotInRepositories(ShardId shard if (repositories.stream().anyMatch(Objects::isNull)) { throw new NullPointerException("null values are not allowed in the repository list"); } - return new GetShardSnapshotRequest(repositories, shardId); + return new GetShardSnapshotRequest(masterNodeTimeout, repositories, shardId); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 674fe117410e5..f0d47813dad77 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; @@ -63,8 +64,13 @@ public class RestoreSnapshotRequest extends MasterNodeRequest { + private static final Logger logger = LogManager.getLogger(LazyRolloverAction.class); + public static final NodeFeature DATA_STREAM_LAZY_ROLLOVER = new NodeFeature("data_stream.rollover.lazy"); public static final LazyRolloverAction INSTANCE = new LazyRolloverAction(); @@ -50,6 +67,8 @@ public String name() { public static final class TransportLazyRolloverAction extends TransportRolloverAction { + private final MasterServiceTaskQueue lazyRolloverTaskQueue; + @Inject public TransportLazyRolloverAction( TransportService transportService, @@ -76,6 +95,11 @@ public TransportLazyRolloverAction( metadataDataStreamsService, dataStreamAutoShardingService ); + this.lazyRolloverTaskQueue = clusterService.createTaskQueue( + "lazy-rollover", + Priority.NORMAL, + new LazyRolloverExecutor(clusterService, allocationService, rolloverService, threadPool) + ); } @Override @@ -93,13 +117,20 @@ protected void masterOperation( : "The auto rollover action does not expect any other parameters in the request apart from the data stream name"; Metadata metadata = clusterState.metadata(); + DataStream dataStream = metadata.dataStreams().get(rolloverRequest.getRolloverTarget()); + // Skip submitting the task if we detect that the lazy rollover has been already executed. + if (isLazyRolloverNeeded(dataStream, rolloverRequest.targetsFailureStore()) == false) { + DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(rolloverRequest.targetsFailureStore()); + listener.onResponse(noopLazyRolloverResponse(targetIndices)); + return; + } // We evaluate the names of the source index as well as what our newly created index would be. final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( clusterState, rolloverRequest.getRolloverTarget(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), - rolloverRequest.indicesOptions().failureStoreOptions().includeFailureIndices() + rolloverRequest.targetsFailureStore() ); final String trialSourceIndexName = trialRolloverNames.sourceName(); final String trialRolloverIndexName = trialRolloverNames.rolloverName(); @@ -107,24 +138,174 @@ protected void masterOperation( assert metadata.dataStreams().containsKey(rolloverRequest.getRolloverTarget()) : "Auto-rollover applies only to data streams"; - final RolloverResponse trialRolloverResponse = new RolloverResponse( - trialSourceIndexName, - trialRolloverIndexName, - Map.of(), - false, - false, - false, - false, - false - ); - String source = "lazy_rollover source [" + trialSourceIndexName + "] to target [" + trialRolloverIndexName + "]"; // We create a new rollover request to ensure that it doesn't contain any other parameters apart from the data stream name // This will provide a more resilient user experience var newRolloverRequest = new RolloverRequest(rolloverRequest.getRolloverTarget(), null); newRolloverRequest.setIndicesOptions(rolloverRequest.indicesOptions()); - RolloverTask rolloverTask = new RolloverTask(newRolloverRequest, null, trialRolloverResponse, null, listener); - submitRolloverTask(rolloverRequest, source, rolloverTask); + LazyRolloverTask rolloverTask = new LazyRolloverTask(newRolloverRequest, listener); + lazyRolloverTaskQueue.submitTask(source, rolloverTask, rolloverRequest.masterNodeTimeout()); + } + } + + /** + * A lazy rollover task holds the rollover request and the listener. + */ + record LazyRolloverTask(RolloverRequest rolloverRequest, ActionListener listener) + implements + ClusterStateTaskListener { + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); } } + + /** + * Performs a lazy rollover when required and notifies the listener. Due to the nature of the lazy rollover we are able + * to perform certain optimisations like identifying duplicate requests and executing them once. This is an optimisation + * that can work since we do not take into consideration any stats or auto-sharding conditions here. + */ + record LazyRolloverExecutor( + ClusterService clusterService, + AllocationService allocationService, + MetadataRolloverService rolloverService, + ThreadPool threadPool + ) implements ClusterStateTaskExecutor { + + @Override + public ClusterState execute(BatchExecutionContext batchExecutionContext) { + final var listener = new AllocationActionMultiListener(threadPool.getThreadContext()); + final var results = new ArrayList(batchExecutionContext.taskContexts().size()); + var state = batchExecutionContext.initialState(); + Map>> groupedRequests = new HashMap<>(); + for (final var taskContext : batchExecutionContext.taskContexts()) { + groupedRequests.computeIfAbsent(taskContext.getTask().rolloverRequest(), ignored -> new ArrayList<>()).add(taskContext); + } + for (final var entry : groupedRequests.entrySet()) { + List> rolloverTaskContexts = entry.getValue(); + try { + RolloverRequest rolloverRequest = entry.getKey(); + state = executeTask(state, rolloverRequest, results, rolloverTaskContexts, listener); + } catch (Exception e) { + rolloverTaskContexts.forEach(taskContext -> taskContext.onFailure(e)); + } finally { + rolloverTaskContexts.forEach(taskContext -> taskContext.captureResponseHeaders().close()); + } + } + + if (state != batchExecutionContext.initialState()) { + var reason = new StringBuilder(); + Strings.collectionToDelimitedStringWithLimit( + (Iterable) () -> Iterators.map(results.iterator(), t -> t.sourceIndexName() + "->" + t.rolloverIndexName()), + ",", + "lazy bulk rollover [", + "]", + 1024, + reason + ); + try (var ignored = batchExecutionContext.dropHeadersContext()) { + state = allocationService.reroute(state, reason.toString(), listener.reroute()); + } + } else { + listener.noRerouteNeeded(); + } + return state; + } + + public ClusterState executeTask( + ClusterState currentState, + RolloverRequest rolloverRequest, + List results, + List> rolloverTaskContexts, + AllocationActionMultiListener allocationActionMultiListener + ) throws Exception { + + // If the data stream has been rolled over since it was marked for lazy rollover, this operation is a noop + final DataStream dataStream = currentState.metadata().dataStreams().get(rolloverRequest.getRolloverTarget()); + assert dataStream != null; + + if (isLazyRolloverNeeded(dataStream, rolloverRequest.targetsFailureStore()) == false) { + final DataStream.DataStreamIndices targetIndices = dataStream.getDataStreamIndices(rolloverRequest.targetsFailureStore()); + var noopResponse = noopLazyRolloverResponse(targetIndices); + notifyAllListeners(rolloverTaskContexts, context -> context.getTask().listener.onResponse(noopResponse)); + return currentState; + } + + // Perform the actual rollover + final var rolloverResult = rolloverService.rolloverClusterState( + currentState, + rolloverRequest.getRolloverTarget(), + rolloverRequest.getNewIndexName(), + rolloverRequest.getCreateIndexRequest(), + List.of(), + Instant.now(), + false, + false, + null, + null, + rolloverRequest.targetsFailureStore() + ); + results.add(rolloverResult); + logger.trace("lazy rollover result [{}]", rolloverResult); + + final var rolloverIndexName = rolloverResult.rolloverIndexName(); + final var sourceIndexName = rolloverResult.sourceIndexName(); + + final var waitForActiveShardsTimeout = rolloverRequest.masterNodeTimeout().millis() < 0 + ? null + : rolloverRequest.masterNodeTimeout(); + + notifyAllListeners(rolloverTaskContexts, context -> { + // Now assuming we have a new state and the name of the rolled over index, we need to wait for the configured number of + // active shards, as well as return the names of the indices that were rolled/created + ActiveShardsObserver.waitForActiveShards( + clusterService, + new String[] { rolloverIndexName }, + rolloverRequest.getCreateIndexRequest().waitForActiveShards(), + waitForActiveShardsTimeout, + allocationActionMultiListener.delay(context.getTask().listener()) + .map( + isShardsAcknowledged -> new RolloverResponse( + // Note that we use the actual rollover result for these, because even though we're single threaded, + // it's possible for the rollover names generated before the actual rollover to be different due to + // things like date resolution + sourceIndexName, + rolloverIndexName, + Map.of(), + false, + true, + true, + isShardsAcknowledged, + false + ) + ) + ); + }); + + // Return the new rollover cluster state, which includes the changes that create the new index + return rolloverResult.clusterState(); + } + } + + /** + * A lazy rollover is only needed if the data stream is marked to rollover on write or if it targets the failure store + * and the failure store is empty. + */ + private static boolean isLazyRolloverNeeded(DataStream dataStream, boolean failureStore) { + DataStream.DataStreamIndices indices = dataStream.getDataStreamIndices(failureStore); + return indices.isRolloverOnWrite() || (failureStore && indices.getIndices().isEmpty()); + } + + private static void notifyAllListeners( + List> taskContexts, + Consumer> onPublicationSuccess + ) { + taskContexts.forEach(context -> context.success(() -> onPublicationSuccess.accept(context))); + } + + private static RolloverResponse noopLazyRolloverResponse(DataStream.DataStreamIndices indices) { + String latestWriteIndex = indices.getWriteIndex().getName(); + return new RolloverResponse(latestWriteIndex, latestWriteIndex, Map.of(), false, false, true, true, false); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index dea772cc893f2..6302b1c9ef9fb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.RestApiVersion; @@ -182,6 +183,13 @@ public IndicesOptions indicesOptions() { return indicesOptions; } + /** + * @return true of the rollover request targets the failure store, false otherwise. + */ + public boolean targetsFailureStore() { + return DataStream.isFailureStoreFeatureFlagEnabled() && indicesOptions.failureStoreOptions().includeFailureIndices(); + } + public void setIndicesOptions(IndicesOptions indicesOptions) { this.indicesOptions = indicesOptions; } @@ -192,14 +200,14 @@ public boolean includeDataStreams() { } /** - * Sets the rollover target to rollover to another index + * Sets the rollover target to roll over to another index */ public void setRolloverTarget(String rolloverTarget) { this.rolloverTarget = rolloverTarget; } /** - * Sets the alias to rollover to another index + * Sets the alias to roll over to another index */ public void setNewIndexName(String newIndexName) { this.newIndexName = newIndexName; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 34da6795cd5f2..d76cfedd279b5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -169,7 +169,7 @@ protected void masterOperation( assert task instanceof CancellableTask; Metadata metadata = clusterState.metadata(); // We evaluate the names of the index for which we should evaluate conditions, as well as what our newly created index *would* be. - boolean targetFailureStore = rolloverRequest.indicesOptions().failureStoreOptions().includeFailureIndices(); + boolean targetFailureStore = rolloverRequest.targetsFailureStore(); final MetadataRolloverService.NameResolution trialRolloverNames = MetadataRolloverService.resolveRolloverNames( clusterState, rolloverRequest.getRolloverTarget(), @@ -335,7 +335,7 @@ protected void masterOperation( rolloverAutoSharding, delegate ); - submitRolloverTask(rolloverRequest, source, rolloverTask); + rolloverTaskQueue.submitTask(source, rolloverTask, rolloverRequest.masterNodeTimeout()); } else { // conditions not met delegate.onResponse(trialRolloverResponse); @@ -375,10 +375,6 @@ private void initializeFailureStore( String source = "initialize_failure_store with index [" + trialRolloverIndexName + "]"; RolloverTask rolloverTask = new RolloverTask(rolloverRequest, null, trialRolloverResponse, null, listener); - submitRolloverTask(rolloverRequest, source, rolloverTask); - } - - void submitRolloverTask(RolloverRequest rolloverRequest, String source, RolloverTask rolloverTask) { rolloverTaskQueue.submitTask(source, rolloverTask, rolloverRequest.masterNodeTimeout()); } @@ -501,7 +497,7 @@ public ClusterState executeTask( rolloverRequest.getRolloverTarget(), rolloverRequest.getNewIndexName(), rolloverRequest.getCreateIndexRequest(), - rolloverRequest.indicesOptions().failureStoreOptions().includeFailureIndices() + rolloverRequest.targetsFailureStore() ); // Re-evaluate the conditions, now with our final source index name @@ -552,7 +548,7 @@ public ClusterState executeTask( false, sourceIndexStats, rolloverTask.autoShardingResult(), - rolloverRequest.indicesOptions().failureStoreOptions().includeFailureIndices() + rolloverRequest.targetsFailureStore() ); results.add(rolloverResult); logger.trace("rollover result [{}]", rolloverResult); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index e0a28e635a0a3..b9f753189c077 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -214,12 +214,12 @@ private void rollOverFailureStores(Runnable runnable) { @Override public void onResponse(RolloverResponse result) { - // A successful response has rolled_over false when in the following cases: - // - A request had the parameter lazy or dry_run enabled - // - A request had conditions that were not met - // Since none of the above apply, getting a response with rolled_over false is considered a bug - // that should be caught here and inform the developer. - assert result.isRolledOver() : "An successful lazy rollover should always result in a rolled over data stream"; + logger.debug( + "Data stream failure store {} has {} over, the latest index is {}", + dataStream, + result.isRolledOver() ? "been successfully rolled" : "skipped rolling", + result.getNewIndex() + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 4fc17407ae6d0..b14a63362cb9f 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -534,12 +534,13 @@ private void rollOverDataStreams( @Override public void onResponse(RolloverResponse result) { - // A successful response has rolled_over false when in the following cases: - // - A request had the parameter lazy or dry_run enabled - // - A request had conditions that were not met - // Since none of the above apply, getting a response with rolled_over false is considered a bug - // that should be caught here and inform the developer. - assert result.isRolledOver() : "An successful lazy rollover should always result in a rolled over data stream"; + logger.debug( + "Data stream{} {} has {} over, the latest index is {}", + rolloverRequest.targetsFailureStore() ? " failure store" : "", + dataStream, + result.isRolledOver() ? "been successfully rolled" : "skipped rolling", + result.getNewIndex() + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java index 4e42de57d08d3..f14a2f6fb5247 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java @@ -122,8 +122,11 @@ import org.elasticsearch.action.ingest.SimulatePipelineRequest; import org.elasticsearch.action.ingest.SimulatePipelineRequestBuilder; import org.elasticsearch.action.ingest.SimulatePipelineResponse; +import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; @@ -295,28 +298,46 @@ public void putRepository(PutRepositoryRequest request, ActionListener listener) { execute(TransportDeleteRepositoryAction.TYPE, request, listener); } + public DeleteRepositoryRequestBuilder prepareDeleteRepository(TimeValue masterNodeTimeout, TimeValue ackTimeout, String name) { + return new DeleteRepositoryRequestBuilder(this, masterNodeTimeout, ackTimeout, name); + } + + @Deprecated(forRemoval = true) // temporary compatibility shim public DeleteRepositoryRequestBuilder prepareDeleteRepository(String name) { - return new DeleteRepositoryRequestBuilder(this, name); + return prepareDeleteRepository( + MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, + AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, + name + ); } public void getRepositories(GetRepositoriesRequest request, ActionListener listener) { execute(GetRepositoriesAction.INSTANCE, request, listener); } - public GetRepositoriesRequestBuilder prepareGetRepositories(String... name) { - return new GetRepositoriesRequestBuilder(this, name); + public GetRepositoriesRequestBuilder prepareGetRepositories(TimeValue masterNodeTimeout, String... name) { + return new GetRepositoriesRequestBuilder(this, masterNodeTimeout, name); } - public CleanupRepositoryRequestBuilder prepareCleanupRepository(String repository) { - return new CleanupRepositoryRequestBuilder(this, repository); + public CleanupRepositoryRequestBuilder prepareCleanupRepository(TimeValue masterNodeTimeout, TimeValue ackTimeout, String repository) { + return new CleanupRepositoryRequestBuilder(this, masterNodeTimeout, ackTimeout, repository); } public void cleanupRepository(CleanupRepositoryRequest request, ActionListener listener) { @@ -327,8 +348,8 @@ public void verifyRepository(VerifyRepositoryRequest request, ActionListener createSnapshot(CreateSnapshotRequest request) { @@ -339,12 +360,17 @@ public void createSnapshot(CreateSnapshotRequest request, ActionListener listener) { @@ -355,16 +381,26 @@ public void getSnapshots(GetSnapshotsRequest request, ActionListener listener) { execute(TransportDeleteSnapshotAction.TYPE, request, listener); } + @Deprecated(forRemoval = true) // temporary compatibility shim public DeleteSnapshotRequestBuilder prepareDeleteSnapshot(String repository, String... names) { - return new DeleteSnapshotRequestBuilder(this, repository, names); + return prepareDeleteSnapshot(MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, repository, names); + } + + public DeleteSnapshotRequestBuilder prepareDeleteSnapshot(TimeValue masterNodeTimeout, String repository, String... names) { + return new DeleteSnapshotRequestBuilder(this, masterNodeTimeout, repository, names); } public ActionFuture restoreSnapshot(RestoreSnapshotRequest request) { @@ -375,20 +411,25 @@ public void restoreSnapshot(RestoreSnapshotRequest request, ActionListener listener) { execute(TransportSnapshotsStatusAction.TYPE, request, listener); } - public SnapshotsStatusRequestBuilder prepareSnapshotStatus(String repository) { - return new SnapshotsStatusRequestBuilder(this, repository); + public SnapshotsStatusRequestBuilder prepareSnapshotStatus(TimeValue masterNodeTimeout, String repository) { + return new SnapshotsStatusRequestBuilder(this, masterNodeTimeout, repository); } - public SnapshotsStatusRequestBuilder prepareSnapshotStatus() { - return new SnapshotsStatusRequestBuilder(this); + public SnapshotsStatusRequestBuilder prepareSnapshotStatus(TimeValue masterNodeTimeout) { + return new SnapshotsStatusRequestBuilder(this, masterNodeTimeout); } public void putPipeline(PutPipelineRequest request, ActionListener listener) { diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 28df2fad32cbb..9998cb55064e3 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -748,24 +748,4 @@ public static XContentParser mapToXContentParser(XContentParserConfiguration con throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); } } - - /** - * Drains all data available via this parser into a provided builder. - * Provided parser is closed as a result. - * @param parser - * @param destination - */ - public static void drainAndClose(XContentParser parser, XContentBuilder destination) throws IOException { - if (parser.isClosed()) { - throw new IllegalStateException("Can't drain a parser that is closed"); - } - - XContentParser.Token token; - do { - destination.copyCurrentStructure(parser); - token = parser.nextToken(); - } while (token != null); - - parser.close(); - } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 034e8fd0770f3..0966698277723 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -632,6 +632,24 @@ private static void parseArrayDynamic(DocumentParserContext context, String curr } Mapper objectMapperFromTemplate = DynamicFieldsBuilder.createObjectMapperFromTemplate(context, currentFieldName); if (objectMapperFromTemplate == null) { + if (context.indexSettings().isIgnoreDynamicFieldsBeyondLimit() + && context.mappingLookup().exceedsLimit(context.indexSettings().getMappingTotalFieldsLimit(), 1)) { + if (context.canAddIgnoredField()) { + try { + context.addIgnoredField( + IgnoredSourceFieldMapper.NameValue.fromContext( + context, + currentFieldName, + XContentDataHelper.encodeToken(context.parser()) + ) + ); + } catch (IOException e) { + throw new IllegalArgumentException("failed to parse field [" + currentFieldName + " ]", e); + } + } + context.addIgnoredField(currentFieldName); + return; + } parseNonDynamicArray(context, objectMapperFromTemplate, currentFieldName, currentFieldName); } else { if (parsesArrayValue(objectMapperFromTemplate)) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java index f64511f8396ec..efbc75490550d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java @@ -10,10 +10,14 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.ByteUtils; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Tuple; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -21,6 +25,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.Map; /** @@ -145,6 +150,56 @@ static NameValue decode(Object field) { return new NameValue(name, parentOffset, value, null); } + public record MappedNameValue(NameValue nameValue, XContentType type, Map map) {} + + /** + * Parses the passed byte array as a NameValue and converts its decoded value to a map of maps that corresponds to the field-value + * subtree. There is only a single pair at the top level, with the key corresponding to the field name. If the field contains a single + * value, the map contains a single key-value pair. Otherwise, the value of the first pair will be another map etc. + * @param value encoded NameValue + * @return MappedNameValue with the parsed NameValue, the XContentType to use for serializing its contents and the field-value map. + * @throws IOException + */ + public static MappedNameValue decodeAsMap(byte[] value) throws IOException { + BytesRef bytes = new BytesRef(value); + IgnoredSourceFieldMapper.NameValue nameValue = IgnoredSourceFieldMapper.decode(bytes); + XContentBuilder xContentBuilder = XContentBuilder.builder(XContentDataHelper.getXContentType(nameValue.value()).xContent()); + xContentBuilder.startObject().field(nameValue.name()); + XContentDataHelper.decodeAndWrite(xContentBuilder, nameValue.value()); + xContentBuilder.endObject(); + Tuple> result = XContentHelper.convertToMap(BytesReference.bytes(xContentBuilder), true); + return new MappedNameValue(nameValue, result.v1(), result.v2()); + } + + /** + * Clones the passed NameValue, using the passed map to produce its value. + * @param mappedNameValue containing the NameValue to clone + * @param map containing a simple field-value pair, or a deeper field-value subtree for objects and arrays with fields + * @return a byte array containing the encoding form of the cloned NameValue + * @throws IOException + */ + public static byte[] encodeFromMap(MappedNameValue mappedNameValue, Map map) throws IOException { + // The first entry is the field name, we skip to get to the value to encode. + assert map.size() == 1; + Object content = map.values().iterator().next(); + + // Check if the field contains a single value or an object. + @SuppressWarnings("unchecked") + XContentBuilder xContentBuilder = (content instanceof Map objectMap) + ? XContentBuilder.builder(mappedNameValue.type().xContent()).map((Map) objectMap) + : XContentBuilder.builder(mappedNameValue.type().xContent()).value(content); + + // Clone the NameValue with the updated value. + NameValue oldNameValue = mappedNameValue.nameValue(); + IgnoredSourceFieldMapper.NameValue filteredNameValue = new IgnoredSourceFieldMapper.NameValue( + oldNameValue.name(), + oldNameValue.parentOffset(), + XContentDataHelper.encodeXContentBuilder(xContentBuilder), + oldNameValue.doc() + ); + return IgnoredSourceFieldMapper.encode(filteredNameValue); + } + // This mapper doesn't contribute to source directly as it has no access to the object structure. Instead, its contents // are loaded by SourceLoader and passed to object mappers that, in turn, write their ignore fields at the appropriate level. @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java index 15caa7f5a6238..fa501a31045e7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java @@ -37,10 +37,6 @@ public static MapperBuilderContext root(boolean isSourceSynthetic, boolean isDat private final ObjectMapper.Dynamic dynamic; private final MergeReason mergeReason; - MapperBuilderContext(String path) { - this(path, false, false, false, ObjectMapper.Defaults.DYNAMIC, MergeReason.MAPPING_UPDATE); - } - MapperBuilderContext( String path, boolean isSourceSynthetic, diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperMergeContext.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperMergeContext.java index 48e04a938d2b2..da005217b1b2d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperMergeContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperMergeContext.java @@ -25,10 +25,6 @@ private MapperMergeContext(MapperBuilderContext mapperBuilderContext, NewFieldsB this.newFieldsBudget = newFieldsBudget; } - static MapperMergeContext root(boolean isSourceSynthetic, boolean isDataStream, long newFieldsBudget) { - return root(isSourceSynthetic, isDataStream, MergeReason.MAPPING_UPDATE, newFieldsBudget); - } - /** * The root context, to be used when merging a tree of mappers */ diff --git a/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java b/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java index 254a0bc9c906b..d97e03d3874ee 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/XContentDataHelper.java @@ -92,6 +92,18 @@ static void decodeAndWrite(XContentBuilder b, BytesRef r) throws IOException { } } + /** + * Returns the {@link XContentType} to use for creating an XContentBuilder to decode the passed value. + */ + public static XContentType getXContentType(BytesRef r) { + return switch ((char) r.bytes[r.offset]) { + case JSON_OBJECT_ENCODING -> XContentType.JSON; + case YAML_OBJECT_ENCODING -> XContentType.YAML; + case SMILE_OBJECT_ENCODING -> XContentType.SMILE; + default -> XContentType.CBOR; // CBOR can parse all other encoded types. + }; + } + /** * Stores the current parser structure (subtree) to an {@link XContentBuilder} and returns it, along with a * {@link DocumentParserContext} wrapping it that can be used to reparse the subtree. diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java b/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java index 6e53b8416ebd3..c414f7c100633 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsService.java @@ -18,6 +18,7 @@ import org.apache.lucene.store.Lock; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.snapshots.get.shard.GetShardSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.get.shard.GetShardSnapshotResponse; @@ -31,6 +32,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; import org.elasticsearch.index.store.StoreFileMetadata; @@ -91,7 +93,13 @@ public void fetchLatestSnapshotsForShard(ShardId shardId, ActionListener systemLoaderURLs, PluginBundle bundle, M Set pluginUrls = transitiveUrls.get(extendedPlugin); assert pluginUrls != null : "transitive urls should have already been set for " + extendedPlugin; - // consistency check: extended plugins should not have duplicate codebases with each other - Set intersection = new HashSet<>(extendedPluginUrls); - intersection.retainAll(pluginUrls); - if (intersection.isEmpty() == false) { - throw new IllegalStateException( - "jar hell! extended plugins " + exts + " have duplicate codebases with each other: " + intersection - ); - } - // jar hell check: extended plugins (so far) do not have jar hell with each other extendedPluginUrls.addAll(pluginUrls); JarHell.checkJarHell(extendedPluginUrls, logger::debug); // consistency check: each extended plugin should not have duplicate codebases with implementation+spi of this plugin - intersection = new HashSet<>(bundle.allUrls); + Set intersection = new HashSet<>(bundle.allUrls); intersection.retainAll(pluginUrls); if (intersection.isEmpty() == false) { throw new IllegalStateException( diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 181fe6afb97d9..c63be88215655 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -495,8 +495,8 @@ public static class UnregisterRepositoryTask extends AckedClusterStateUpdateTask * Constructor used by {@link org.elasticsearch.action.admin.cluster.repositories.reservedstate.ReservedRepositoryAction} * @param name the repository name */ - public UnregisterRepositoryTask(String name) { - this(new DeleteRepositoryRequest(name), null); + public UnregisterRepositoryTask(TimeValue dummyTimeout, String name) { + this(new DeleteRepositoryRequest(dummyTimeout, dummyTimeout, name), null); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index e27ba56bed974..8f55bf16c1674 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -22,6 +22,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.SingleResultDeduplicator; @@ -58,8 +59,15 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.compress.DeflateCompressor; import org.elasticsearch.common.compress.NotXContentException; import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.metrics.CounterMetric; @@ -77,6 +85,7 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; @@ -122,16 +131,21 @@ import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.Closeable; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.io.UncheckedIOException; import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.Collection; @@ -156,6 +170,8 @@ import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; +import java.util.zip.DeflaterOutputStream; +import java.util.zip.InflaterInputStream; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; @@ -1010,10 +1026,35 @@ class SnapshotsDeletion { // The overall flow of execution void runDelete(SnapshotDeleteListener listener) { + final var releasingListener = new SnapshotDeleteListener() { + @Override + public void onDone() { + try { + shardBlobsToDelete.close(); + } finally { + listener.onDone(); + } + } + + @Override + public void onRepositoryDataWritten(RepositoryData repositoryData) { + listener.onRepositoryDataWritten(repositoryData); + } + + @Override + public void onFailure(Exception e) { + try { + shardBlobsToDelete.close(); + } finally { + listener.onFailure(e); + } + + } + }; if (useShardGenerations) { - runWithUniqueShardMetadataNaming(listener); + runWithUniqueShardMetadataNaming(releasingListener); } else { - runWithLegacyNumericShardMetadataNaming(wrapWithWeakConsistencyProtection(listener)); + runWithLegacyNumericShardMetadataNaming(wrapWithWeakConsistencyProtection(releasingListener)); } } @@ -1088,14 +1129,15 @@ void runCleanup(ActionListener listener) { .map(IndexId::getId) .collect(Collectors.toSet()); final List staleRootBlobs = staleRootBlobs(originalRepositoryData, originalRootBlobs.keySet()); + final var releasingListener = ActionListener.releaseAfter(listener, shardBlobsToDelete); if (survivingIndexIds.equals(originalIndexContainers.keySet()) && staleRootBlobs.isEmpty()) { // Nothing to clean up we return - listener.onResponse(DeleteResult.ZERO); + releasingListener.onResponse(DeleteResult.ZERO); } else { // write new index-N blob to ensure concurrent operations will fail updateRepositoryData( originalRepositoryData, - listener.delegateFailureAndWrap( + releasingListener.delegateFailureAndWrap( // TODO should we pass newRepositoryData to cleanupStaleBlobs()? (l, newRepositoryData) -> cleanupUnlinkedRootAndIndicesBlobs( originalRepositoryData, @@ -1513,33 +1555,62 @@ private void logStaleRootLevelBlobs( /** * Tracks the shard-level blobs which can be deleted once all the metadata updates have completed during a snapshot deletion. */ - class ShardBlobsToDelete { + class ShardBlobsToDelete implements Releasable { /** * The result of removing a snapshot from a shard folder in the repository. * - * @param indexId Index that the snapshot was removed from + * @param indexId Repository UUID for index that the snapshot was removed from * @param shardId Shard id that the snapshot was removed from - * @param newGeneration Id of the new index-${uuid} blob that does not include the snapshot any more * @param blobsToDelete Blob names in the shard directory that have become unreferenced in the new shard generation */ - private record ShardSnapshotMetaDeleteResult( - IndexId indexId, - int shardId, - ShardGeneration newGeneration, - Collection blobsToDelete - ) {} + private record ShardSnapshotMetaDeleteResult(String indexId, int shardId, Collection blobsToDelete) { + ShardSnapshotMetaDeleteResult(StreamInput in) throws IOException { + this(in.readString(), in.readVInt(), in.readStringCollectionAsImmutableList()); + assert in.getTransportVersion().equals(TransportVersion.current()); // only used in memory on the local node + } + + void writeTo(StreamOutput out) throws IOException { + assert out.getTransportVersion().equals(TransportVersion.current()); // only used in memory on the local node + out.writeString(indexId); + out.writeVInt(shardId); + out.writeStringCollection(blobsToDelete); + } + } /** *

- * Shard-level results, see {@link ShardSnapshotMetaDeleteResult}. + * Shard-level results, i.e. a sequence of {@link ShardSnapshotMetaDeleteResult} objects, except serialized, concatenated, and + * compressed in order to reduce the memory footprint by about 4x when compared with a list of bare objects. This can be GiBs in + * size if we're deleting snapshots from a large repository, especially if earlier failures left behind lots of dangling blobs + * for some reason. *

*

- * Writes to this list are all synchronized (via {@link #addShardDeleteResult}), and happen-before it is read so the reads need - * no further synchronization + * Writes to this object are all synchronized (via {@link #addShardDeleteResult}), and happen-before it is read, so the reads + * need no further synchronization. *

*/ - private final List shardDeleteResults = new ArrayList<>(); + // If the size of this continues to be a problem even after compression, consider either a hard limit on its size (preferring leaked + // blobs over an OOME on the master) or else offloading it to disk or to the repository itself. + private final BytesStreamOutput shardDeleteResults = new ReleasableBytesStreamOutput(bigArrays); + + private int resultCount = 0; + + private final StreamOutput compressed = new OutputStreamStreamOutput( + new BufferedOutputStream( + new DeflaterOutputStream(Streams.flushOnCloseStream(shardDeleteResults)), + DeflateCompressor.BUFFER_SIZE + ) + ); + + private final ArrayList resources = new ArrayList<>(); + + private final ShardGenerations.Builder shardGenerationsBuilder = ShardGenerations.builder(); + + ShardBlobsToDelete() { + resources.add(compressed); + resources.add(LeakTracker.wrap((Releasable) shardDeleteResults)); + } synchronized void addShardDeleteResult( IndexId indexId, @@ -1547,23 +1618,62 @@ synchronized void addShardDeleteResult( ShardGeneration newGeneration, Collection blobsToDelete ) { - shardDeleteResults.add(new ShardSnapshotMetaDeleteResult(indexId, shardId, newGeneration, blobsToDelete)); + try { + shardGenerationsBuilder.put(indexId, shardId, newGeneration); + new ShardSnapshotMetaDeleteResult(Objects.requireNonNull(indexId.getId()), shardId, blobsToDelete).writeTo(compressed); + resultCount += 1; + } catch (IOException e) { + assert false : e; // no IO actually happens here + throw new UncheckedIOException(e); + } } public ShardGenerations getUpdatedShardGenerations() { - final var builder = ShardGenerations.builder(); - for (var shardResult : shardDeleteResults) { - builder.put(shardResult.indexId, shardResult.shardId, shardResult.newGeneration); - } - return builder.build(); + return shardGenerationsBuilder.build(); } public Iterator getBlobPaths() { - return Iterators.flatMap(shardDeleteResults.iterator(), shardResult -> { - final var shardPath = shardPath(shardResult.indexId, shardResult.shardId).buildAsString(); + final StreamInput input; + try { + compressed.close(); + input = new InputStreamStreamInput( + new BufferedInputStream( + new InflaterInputStream(shardDeleteResults.bytes().streamInput()), + DeflateCompressor.BUFFER_SIZE + ) + ); + resources.add(input); + } catch (IOException e) { + assert false : e; // no IO actually happens here + throw new UncheckedIOException(e); + } + + return Iterators.flatMap(Iterators.forRange(0, resultCount, i -> { + try { + return new ShardSnapshotMetaDeleteResult(input); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }), shardResult -> { + final var shardPath = shardPath(new IndexId("_na_", shardResult.indexId), shardResult.shardId).buildAsString(); return Iterators.map(shardResult.blobsToDelete.iterator(), blob -> shardPath + blob); }); } + + @Override + public void close() { + try { + IOUtils.close(resources); + } catch (IOException e) { + assert false : e; // no IO actually happens here + throw new UncheckedIOException(e); + } + } + + // exposed for tests + int sizeInBytes() { + return shardDeleteResults.size(); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java b/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java index d8852155f8d77..92cdf57102f42 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/ReservedClusterStateHandler.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -121,4 +122,9 @@ default void validate(MasterNodeRequest request) { * @throws IOException */ T fromXContent(XContentParser parser) throws IOException; + + /** + * Reserved-state handlers create master-node requests but never actually send them to the master node so the timeouts are not relevant. + */ + TimeValue DUMMY_TIMEOUT = TimeValue.THIRTY_SECONDS; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java index d2c6626cb35c1..ac9748251953c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java @@ -41,10 +41,11 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - String name = request.param("repository"); - CleanupRepositoryRequest cleanupRepositoryRequest = new CleanupRepositoryRequest(name); - cleanupRepositoryRequest.ackTimeout(getAckTimeout(request)); - cleanupRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final var cleanupRepositoryRequest = new CleanupRepositoryRequest( + getMasterNodeTimeout(request), + getAckTimeout(request), + request.param("repository") + ); return channel -> client.admin().cluster().cleanupRepository(cleanupRepositoryRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java index b6b63a6774667..713c3243f5ec8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java @@ -47,12 +47,12 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC try (var parser = request.contentParser()) { final Map source = parser.map(); final CloneSnapshotRequest cloneSnapshotRequest = new CloneSnapshotRequest( + getMasterNodeTimeout(request), request.param("repository"), request.param("snapshot"), request.param("target_snapshot"), XContentMapValues.nodeStringArrayValue(source.getOrDefault("indices", Collections.emptyList())) ); - cloneSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); cloneSnapshotRequest.indicesOptions(IndicesOptions.fromMap(source, cloneSnapshotRequest.indicesOptions())); return channel -> client.admin().cluster().cloneSnapshot(cloneSnapshotRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java index 9491ecfcc1115..8f4e42e210be8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCreateSnapshotAction.java @@ -43,9 +43,8 @@ public String getName() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String repository = request.param("repository"); String snapshot = request.param("snapshot"); - CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(repository, snapshot); + final var createSnapshotRequest = new CreateSnapshotRequest(getMasterNodeTimeout(request), repository, snapshot); request.applyContentParser(p -> createSnapshotRequest.source(p.mapOrdered())); - createSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); createSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); return channel -> client.admin().cluster().createSnapshot(createSnapshotRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java index 067a40e293ff8..26e9cd101a53d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteRepositoryAction.java @@ -45,9 +45,7 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String name = request.param("repository"); - DeleteRepositoryRequest deleteRepositoryRequest = new DeleteRepositoryRequest(name); - deleteRepositoryRequest.ackTimeout(getAckTimeout(request)); - deleteRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final var deleteRepositoryRequest = new DeleteRepositoryRequest(getMasterNodeTimeout(request), getAckTimeout(request), name); return channel -> client.admin() .cluster() .deleteRepository( diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java index 37870c44fe256..74c258fd8b402 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestDeleteSnapshotAction.java @@ -61,8 +61,7 @@ public Set supportedQueryParameters() { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String repository = request.param("repository"); String[] snapshots = Strings.splitStringByCommaToArray(request.param("snapshot")); - DeleteSnapshotRequest deleteSnapshotRequest = new DeleteSnapshotRequest(repository, snapshots); - deleteSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final var deleteSnapshotRequest = new DeleteSnapshotRequest(getMasterNodeTimeout(request), repository, snapshots); deleteSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", deleteSnapshotRequest.waitForCompletion())); return channel -> client.admin().cluster().deleteSnapshot(deleteSnapshotRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java index c2d4484f1e098..4c15b6514c7bd 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetRepositoriesAction.java @@ -51,8 +51,7 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String[] repositories = request.paramAsStringArray("repository", Strings.EMPTY_ARRAY); - GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(repositories); - getRepositoriesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final var getRepositoriesRequest = new GetRepositoriesRequest(getMasterNodeTimeout(request), repositories); getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local())); settingsFilter.addFilterSettingParams(request); return channel -> client.admin().cluster().getRepositories(getRepositoriesRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java index 45913b9b3ce2a..2b3ef6581e14f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java @@ -58,7 +58,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC String[] repositories = request.paramAsStringArray("repository", Strings.EMPTY_ARRAY); String[] snapshots = request.paramAsStringArray("snapshot", Strings.EMPTY_ARRAY); - GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest(repositories).snapshots(snapshots); + final var getSnapshotsRequest = new GetSnapshotsRequest(getMasterNodeTimeout(request), repositories).snapshots(snapshots); getSnapshotsRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", getSnapshotsRequest.ignoreUnavailable())); getSnapshotsRequest.verbose(request.paramAsBoolean("verbose", getSnapshotsRequest.verbose())); final SnapshotSortKey sort = SnapshotSortKey.of(request.param("sort", getSnapshotsRequest.sort().toString())); @@ -81,7 +81,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final SortOrder order = SortOrder.fromString(request.param("order", getSnapshotsRequest.order().toString())); getSnapshotsRequest.order(order); getSnapshotsRequest.includeIndexNames(request.paramAsBoolean(INDEX_NAMES_XCONTENT_PARAM, getSnapshotsRequest.includeIndexNames())); - getSnapshotsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() .getSnapshots(getSnapshotsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java index b25e394185877..51c4f0a6c9e13 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPutRepositoryAction.java @@ -47,13 +47,11 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { String name = request.param("repository"); - PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(name); + final var putRepositoryRequest = new PutRepositoryRequest(getMasterNodeTimeout(request), getAckTimeout(request), name); try (XContentParser parser = request.contentParser()) { putRepositoryRequest.source(parser.mapOrdered()); } putRepositoryRequest.verify(request.paramAsBoolean("verify", true)); - putRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); - putRepositoryRequest.ackTimeout(getAckTimeout(request)); return channel -> client.admin() .cluster() .putRepository( diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestResetFeatureStateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestResetFeatureStateAction.java index dcf6a1d165e7a..21a8349770a45 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestResetFeatureStateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestResetFeatureStateAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; @@ -43,8 +44,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - final ResetFeatureStateRequest req = new ResetFeatureStateRequest(); - + final var req = new ResetFeatureStateRequest(RestUtils.getMasterNodeTimeout(request)); return restChannel -> client.execute(ResetFeatureStateAction.INSTANCE, req, new RestToXContentListener<>(restChannel, r -> { long failures = r.getFeatureStateResetStatuses() .stream() diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java index 06524a040db36..eeae14d230ca4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRestoreSnapshotAction.java @@ -40,10 +40,11 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - String repository = request.param("repository"); - String snapshot = request.param("snapshot"); - RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(repository, snapshot); - restoreSnapshotRequest.masterNodeTimeout(getMasterNodeTimeout(request)); + final var restoreSnapshotRequest = new RestoreSnapshotRequest( + getMasterNodeTimeout(request), + request.param("repository"), + request.param("snapshot") + ); restoreSnapshotRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); request.applyContentParser(p -> restoreSnapshotRequest.source(p.mapOrdered())); return channel -> client.admin().cluster().restoreSnapshot(restoreSnapshotRequest, new RestToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java index 33b4ba04b826e..f8cb2c6086978 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java @@ -46,15 +46,13 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - String repository = request.param("repository", "_all"); String[] snapshots = request.paramAsStringArray("snapshot", Strings.EMPTY_ARRAY); if (snapshots.length == 1 && "_all".equalsIgnoreCase(snapshots[0])) { snapshots = Strings.EMPTY_ARRAY; } - SnapshotsStatusRequest snapshotsStatusRequest = new SnapshotsStatusRequest(repository).snapshots(snapshots); + final var snapshotsStatusRequest = new SnapshotsStatusRequest(getMasterNodeTimeout(request), request.param("repository", "_all")); + snapshotsStatusRequest.snapshots(snapshots); snapshotsStatusRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", snapshotsStatusRequest.ignoreUnavailable())); - - snapshotsStatusRequest.masterNodeTimeout(getMasterNodeTimeout(request)); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() .snapshotsStatus(snapshotsStatusRequest, new RestRefCountedChunkedToXContentListener<>(channel)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java index b36c4ac56ae71..850975672e25f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshottableFeaturesAction.java @@ -37,9 +37,7 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - final GetSnapshottableFeaturesRequest req = new GetSnapshottableFeaturesRequest(); - req.masterNodeTimeout(getMasterNodeTimeout(request)); - - return restChannel -> { client.execute(SnapshottableFeaturesAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); }; + final var req = new GetSnapshottableFeaturesRequest(getMasterNodeTimeout(request)); + return restChannel -> client.execute(SnapshottableFeaturesAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java index 9880268f617db..9477895f0f758 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestVerifyRepositoryAction.java @@ -38,10 +38,11 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - String name = request.param("repository"); - VerifyRepositoryRequest verifyRepositoryRequest = new VerifyRepositoryRequest(name); - verifyRepositoryRequest.masterNodeTimeout(getMasterNodeTimeout(request)); - verifyRepositoryRequest.ackTimeout(getAckTimeout(request)); + final var verifyRepositoryRequest = new VerifyRepositoryRequest( + getMasterNodeTimeout(request), + getAckTimeout(request), + request.param("repository") + ); return channel -> client.admin().cluster().verifyRepository(verifyRepositoryRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java index 5744923b86d6c..6eacafef2795e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestRepositoriesAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -37,10 +38,8 @@ public List routes() { @Override protected RestChannelConsumer doCatRequest(RestRequest request, NodeClient client) { - GetRepositoriesRequest getRepositoriesRequest = new GetRepositoriesRequest(); + final var getRepositoriesRequest = new GetRepositoriesRequest(getMasterNodeTimeout(request), Strings.EMPTY_ARRAY); getRepositoriesRequest.local(request.paramAsBoolean("local", getRepositoriesRequest.local())); - getRepositoriesRequest.masterNodeTimeout(getMasterNodeTimeout(request)); - return channel -> client.admin() .cluster() .getRepositories(getRepositoriesRequest, new RestResponseListener(channel) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java index 0ff44e37698d9..d6fc9efc183f6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java @@ -49,16 +49,18 @@ public String getName() { return "cat_snapshot_action"; } + private static final String[] MATCH_ALL_PATTERNS = { ResolvedRepositories.ALL_PATTERN }; + @Override protected RestChannelConsumer doCatRequest(final RestRequest request, NodeClient client) { - final String[] matchAll = { ResolvedRepositories.ALL_PATTERN }; - GetSnapshotsRequest getSnapshotsRequest = new GetSnapshotsRequest().repositories(request.paramAsStringArray("repository", matchAll)) - .snapshots(matchAll); + final var getSnapshotsRequest = new GetSnapshotsRequest( + getMasterNodeTimeout(request), + request.paramAsStringArray("repository", MATCH_ALL_PATTERNS), + MATCH_ALL_PATTERNS + ); getSnapshotsRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", getSnapshotsRequest.ignoreUnavailable())); - getSnapshotsRequest.masterNodeTimeout(getMasterNodeTimeout(request)); - return channel -> client.admin().cluster().getSnapshots(getSnapshotsRequest, new RestResponseListener<>(channel) { @Override public RestResponse buildResponse(GetSnapshotsResponse getSnapshotsResponse) throws Exception { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java index f02d551fd9a44..168eac6d60245 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java @@ -27,7 +27,7 @@ public class PutRepositoryRequestTests extends ESTestCase { @SuppressWarnings("unchecked") public void testCreateRepositoryToXContent() throws IOException { Map mapParams = new HashMap<>(); - PutRepositoryRequest request = new PutRepositoryRequest(); + PutRepositoryRequest request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); String repoName = "test"; request.name(repoName); mapParams.put("name", repoName); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java index 794be6e463548..2886ca7be4821 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java @@ -38,7 +38,7 @@ public void testToXContent() throws IOException { String repo = randomAlphaOfLength(5); String snap = randomAlphaOfLength(10); - CreateSnapshotRequest original = new CreateSnapshotRequest(repo, snap); + CreateSnapshotRequest original = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, repo, snap); if (randomBoolean()) { List indices = new ArrayList<>(); @@ -106,7 +106,11 @@ public void testToXContent() throws IOException { .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()) ) { Map map = parser.mapOrdered(); - CreateSnapshotRequest processed = new CreateSnapshotRequest((String) map.get("repository"), (String) map.get("snapshot")); + CreateSnapshotRequest processed = new CreateSnapshotRequest( + TEST_REQUEST_TIMEOUT, + (String) map.get("repository"), + (String) map.get("snapshot") + ); processed.waitForCompletion(original.waitForCompletion()); processed.masterNodeTimeout(original.masterNodeTimeout()); processed.source(map); @@ -162,7 +166,8 @@ public void testSizeCheck() { } private CreateSnapshotRequest createSnapshotRequestWithMetadata(Map metadata) { - return new CreateSnapshotRequest(randomAlphaOfLength(5), randomAlphaOfLength(5)).indices(randomAlphaOfLength(5)) - .userMetadata(metadata); + return new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, randomAlphaOfLength(5), randomAlphaOfLength(5)).indices( + randomAlphaOfLength(5) + ).userMetadata(metadata); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java index 810d297602e8a..b8e958169fc97 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestTests.java @@ -18,62 +18,70 @@ public class GetSnapshotsRequestTests extends ESTestCase { public void testValidateParameters() { { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot"); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot"); assertNull(request.validate()); request.size(0); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("size must be -1 or greater than 0")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").size(randomIntBetween(1, 500)); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").size( + randomIntBetween(1, 500) + ); assertNull(request.validate()); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false).size(randomIntBetween(1, 500)); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").verbose(false) + .size(randomIntBetween(1, 500)); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use size limit with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false).offset(randomIntBetween(1, 500)); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").verbose(false) + .offset(randomIntBetween(1, 500)); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use offset with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false).sort(SnapshotSortKey.INDICES); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").verbose(false) + .sort(SnapshotSortKey.INDICES); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use non-default sort with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false).order(SortOrder.DESC); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").verbose(false) + .order(SortOrder.DESC); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use non-default sort order with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false) + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").verbose(false) .after(new SnapshotSortKey.After("foo", "repo", "bar")); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").verbose(false).fromSortValue("bar"); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").verbose(false) + .fromSortValue("bar"); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use from_sort_value with verbose=false")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").after( + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").after( new SnapshotSortKey.After("foo", "repo", "bar") ).offset(randomIntBetween(1, 500)); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after and offset simultaneously")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").fromSortValue("foo") + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").fromSortValue("foo") .after(new SnapshotSortKey.After("foo", "repo", "bar")); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use after and from_sort_value simultaneously")); } { - final GetSnapshotsRequest request = new GetSnapshotsRequest("repo", "snapshot").policies("some-policy").verbose(false); + final GetSnapshotsRequest request = new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, "repo", "snapshot").policies("some-policy") + .verbose(false); final ActionRequestValidationException e = request.validate(); assertThat(e.getMessage(), containsString("can't use slm policy filter with verbose=false")); } @@ -81,6 +89,7 @@ public void testValidateParameters() { public void testGetDescription() { final GetSnapshotsRequest request = new GetSnapshotsRequest( + TEST_REQUEST_TIMEOUT, new String[] { "repo1", "repo2" }, new String[] { "snapshotA", "snapshotB" } ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequestSerializationTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequestSerializationTests.java index fc75fb6650b16..1496609f9bc1f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequestSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequestSerializationTests.java @@ -30,10 +30,10 @@ protected Writeable.Reader instanceReader() { protected GetShardSnapshotRequest createTestInstance() { ShardId shardId = randomShardId(); if (randomBoolean()) { - return GetShardSnapshotRequest.latestSnapshotInAllRepositories(shardId); + return GetShardSnapshotRequest.latestSnapshotInAllRepositories(TEST_REQUEST_TIMEOUT, shardId); } else { List repositories = randomList(1, randomIntBetween(1, 100), () -> randomAlphaOfLength(randomIntBetween(1, 100))); - return GetShardSnapshotRequest.latestSnapshotInRepositories(shardId, repositories); + return GetShardSnapshotRequest.latestSnapshotInRepositories(TEST_REQUEST_TIMEOUT, shardId, repositories); } } @@ -41,9 +41,9 @@ protected GetShardSnapshotRequest createTestInstance() { protected GetShardSnapshotRequest mutateInstance(GetShardSnapshotRequest instance) { ShardId shardId = randomShardId(); if (instance.getFromAllRepositories()) { - return GetShardSnapshotRequest.latestSnapshotInAllRepositories(shardId); + return GetShardSnapshotRequest.latestSnapshotInAllRepositories(TEST_REQUEST_TIMEOUT, shardId); } else { - return GetShardSnapshotRequest.latestSnapshotInRepositories(shardId, instance.getRepositories()); + return GetShardSnapshotRequest.latestSnapshotInRepositories(TEST_REQUEST_TIMEOUT, shardId, instance.getRepositories()); } } @@ -52,7 +52,11 @@ private ShardId randomShardId() { } public void testGetDescription() { - final GetShardSnapshotRequest request = new GetShardSnapshotRequest(Arrays.asList("repo1", "repo2"), new ShardId("idx", "uuid", 0)); + final GetShardSnapshotRequest request = new GetShardSnapshotRequest( + TEST_REQUEST_TIMEOUT, + Arrays.asList("repo1", "repo2"), + new ShardId("idx", "uuid", 0) + ); assertThat(request.getDescription(), equalTo("shard[idx][0], repositories[repo1,repo2]")); final GetShardSnapshotRequest randomRequest = createTestInstance(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java index 45c0a5990f117..53bbfb775f631 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java @@ -107,7 +107,7 @@ private RestoreSnapshotRequest randomState(RestoreSnapshotRequest instance) { @Override protected RestoreSnapshotRequest createTestInstance() { - return randomState(new RestoreSnapshotRequest(randomAlphaOfLength(5), randomAlphaOfLength(10))); + return randomState(new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, randomAlphaOfLength(5), randomAlphaOfLength(10))); } @Override @@ -139,7 +139,7 @@ public void testSource() throws IOException { // we will only restore properties from the map that are contained in the request body. All other // properties are restored from the original (in the actual REST action this is restored from the // REST path and request parameters). - RestoreSnapshotRequest processed = new RestoreSnapshotRequest(original.repository(), original.snapshot()); + RestoreSnapshotRequest processed = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, original.repository(), original.snapshot()); processed.masterNodeTimeout(original.masterNodeTimeout()); processed.waitForCompletion(original.waitForCompletion()); diff --git a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java index 652e7f014b8ef..39e424adecfce 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java +++ b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java @@ -117,7 +117,7 @@ public void testActions() { .execute(new AssertingActionListener<>(TransportClusterStatsAction.TYPE.name(), client.threadPool())); client.admin() .cluster() - .prepareCreateSnapshot("repo", "bck") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "repo", "bck") .execute(new AssertingActionListener<>(TransportCreateSnapshotAction.TYPE.name(), client.threadPool())); client.execute( TransportClusterRerouteAction.TYPE, diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java b/server/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java index a3e11c0645e32..5b50eb63e1489 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java +++ b/server/src/test/java/org/elasticsearch/common/xcontent/support/XContentHelperTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.common.xcontent.support; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; @@ -421,25 +420,4 @@ public void testParseToType() throws IOException { assertThat(names, equalTo(Set.of("a", "c"))); } - - public void testDrainAndClose() throws IOException { - String json = """ - { "a": "b", "c": "d", "e": {"f": "g"}, "h": ["i", "j", {"k": "l"}]}"""; - var parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, json); - var content = XContentBuilder.builder(XContentType.JSON.xContent()); - XContentHelper.drainAndClose(parser, content); - - assertEquals(json.replace(" ", ""), Strings.toString(content)); - assertTrue(parser.isClosed()); - } - - public void testDrainAndCloseAlreadyClosed() throws IOException { - var parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, "{}"); - parser.close(); - - assertThrows( - IllegalStateException.class, - () -> XContentHelper.drainAndClose(parser, XContentBuilder.builder(XContentType.JSON.xContent())) - ); - } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java index 886b0aa9e425d..d6b675ed0eb51 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/FieldAliasMapperValidationTests.java @@ -159,7 +159,7 @@ public void testFieldAliasWithDifferentNestedScopes() { private static FieldMapper createFieldMapper(String parent, String name) { return new BooleanFieldMapper.Builder(name, ScriptCompiler.NONE, false, IndexVersion.current()).build( - new MapperBuilderContext(parent) + new MapperBuilderContext(parent, false, false, false, ObjectMapper.Defaults.DYNAMIC, MapperService.MergeReason.MAPPING_UPDATE) ); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index e7f8a16c5cc10..fc30b9b6677f1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -11,16 +11,19 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.xcontent.XContentBuilder; +import org.hamcrest.Matchers; import java.io.IOException; import java.math.BigInteger; import java.util.Base64; +import java.util.List; import java.util.Locale; +import java.util.Map; public class IgnoredSourceFieldMapperTests extends MapperServiceTestCase { - private String getSyntheticSourceWithFieldLimit(CheckedConsumer build) throws IOException { - DocumentMapper documentMapper = createMapperService( + private DocumentMapper getDocumentMapperWithFieldLimit() throws IOException { + return createMapperService( Settings.builder() .put("index.mapping.total_fields.limit", 2) .put("index.mapping.total_fields.ignore_dynamic_beyond_limit", true) @@ -30,6 +33,15 @@ private String getSyntheticSourceWithFieldLimit(CheckedConsumer build) throws IOException { + DocumentMapper mapper = getDocumentMapperWithFieldLimit(); + return mapper.parse(source(build)); + } + + private String getSyntheticSourceWithFieldLimit(CheckedConsumer build) throws IOException { + DocumentMapper documentMapper = getDocumentMapperWithFieldLimit(); return syntheticSource(documentMapper, build); } @@ -78,7 +90,54 @@ public void testIgnoredBytes() throws IOException { public void testIgnoredObjectBoolean() throws IOException { boolean value = randomBoolean(); - assertEquals("{\"my_value\":" + value + "}", getSyntheticSourceWithFieldLimit(b -> b.field("my_value", value))); + assertEquals("{\"my_object\":{\"my_value\":" + value + "}}", getSyntheticSourceWithFieldLimit(b -> { + b.startObject("my_object").field("my_value", value).endObject(); + })); + } + + public void testIgnoredArray() throws IOException { + assertEquals("{\"my_array\":[{\"int_value\":10},{\"int_value\":20}]}", getSyntheticSourceWithFieldLimit(b -> { + b.startArray("my_array"); + b.startObject().field("int_value", 10).endObject(); + b.startObject().field("int_value", 20).endObject(); + b.endArray(); + })); + } + + public void testEncodeFieldToMap() throws IOException { + String value = randomAlphaOfLength(5); + ParsedDocument parsedDocument = getParsedDocumentWithFieldLimit(b -> b.field("my_value", value)); + byte[] bytes = parsedDocument.rootDoc().getField(IgnoredSourceFieldMapper.NAME).binaryValue().bytes; + IgnoredSourceFieldMapper.MappedNameValue mappedNameValue = IgnoredSourceFieldMapper.decodeAsMap(bytes); + assertEquals("my_value", mappedNameValue.nameValue().name()); + assertEquals(value, mappedNameValue.map().get("my_value")); + } + + @SuppressWarnings("unchecked") + public void testEncodeObjectToMapAndDecode() throws IOException { + String value = randomAlphaOfLength(5); + ParsedDocument parsedDocument = getParsedDocumentWithFieldLimit( + b -> { b.startObject("my_object").field("my_value", value).endObject(); } + ); + byte[] bytes = parsedDocument.rootDoc().getField(IgnoredSourceFieldMapper.NAME).binaryValue().bytes; + IgnoredSourceFieldMapper.MappedNameValue mappedNameValue = IgnoredSourceFieldMapper.decodeAsMap(bytes); + assertEquals("my_object", mappedNameValue.nameValue().name()); + assertEquals(value, ((Map) mappedNameValue.map().get("my_object")).get("my_value")); + assertArrayEquals(bytes, IgnoredSourceFieldMapper.encodeFromMap(mappedNameValue, mappedNameValue.map())); + } + + public void testEncodeArrayToMapAndDecode() throws IOException { + ParsedDocument parsedDocument = getParsedDocumentWithFieldLimit(b -> { + b.startArray("my_array"); + b.startObject().field("int_value", 10).endObject(); + b.startObject().field("int_value", 20).endObject(); + b.endArray(); + }); + byte[] bytes = parsedDocument.rootDoc().getField(IgnoredSourceFieldMapper.NAME).binaryValue().bytes; + IgnoredSourceFieldMapper.MappedNameValue mappedNameValue = IgnoredSourceFieldMapper.decodeAsMap(bytes); + assertEquals("my_array", mappedNameValue.nameValue().name()); + assertThat((List) mappedNameValue.map().get("my_array"), Matchers.contains(Map.of("int_value", 10), Map.of("int_value", 20))); + assertArrayEquals(bytes, IgnoredSourceFieldMapper.encodeFromMap(mappedNameValue, mappedNameValue.map())); } public void testMultipleIgnoredFieldsRootObject() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperMergeContextTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperMergeContextTests.java index 77d3259ea1091..3cea8a3403307 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperMergeContextTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperMergeContextTests.java @@ -10,21 +10,23 @@ import org.elasticsearch.test.ESTestCase; +import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_UPDATE; + public class MapperMergeContextTests extends ESTestCase { public void testAddFieldIfPossibleUnderLimit() { - MapperMergeContext context = MapperMergeContext.root(false, false, 1); + MapperMergeContext context = MapperMergeContext.root(false, false, MAPPING_UPDATE, 1); assertTrue(context.decrementFieldBudgetIfPossible(1)); assertFalse(context.decrementFieldBudgetIfPossible(1)); } public void testAddFieldIfPossibleAtLimit() { - MapperMergeContext context = MapperMergeContext.root(false, false, 0); + MapperMergeContext context = MapperMergeContext.root(false, false, MAPPING_UPDATE, 0); assertFalse(context.decrementFieldBudgetIfPossible(1)); } public void testAddFieldIfPossibleUnlimited() { - MapperMergeContext context = MapperMergeContext.root(false, false, Long.MAX_VALUE); + MapperMergeContext context = MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE); assertTrue(context.decrementFieldBudgetIfPossible(Integer.MAX_VALUE)); assertTrue(context.decrementFieldBudgetIfPossible(Integer.MAX_VALUE)); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java index 01cbe496e6a3d..1ab1d881d76b3 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MultiFieldsTests.java @@ -16,6 +16,8 @@ import java.util.Map; +import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_UPDATE; + public class MultiFieldsTests extends ESTestCase { public void testMultiFieldsBuilderHasSyntheticSourceCompatibleKeywordField() { @@ -45,7 +47,11 @@ public void testMultiFieldsBuilderHasSyntheticSourceCompatibleKeywordFieldDuring keywordFieldMapperBuilder ).build(MapperBuilderContext.root(false, false)); - builder.merge(newField, new FieldMapper.Conflicts("TextFieldMapper"), MapperMergeContext.root(false, false, Long.MAX_VALUE)); + builder.merge( + newField, + new FieldMapper.Conflicts("TextFieldMapper"), + MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE) + ); var expected = hasNormalizer == false; assertEquals(expected, builder.multiFieldsBuilder.hasSyntheticSourceCompatibleKeywordField()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java index c767429d4c0fb..289f12d1508f9 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NestedObjectMapperTests.java @@ -1511,7 +1511,7 @@ public void testMergeNested() { MapperException e = expectThrows( MapperException.class, - () -> firstMapper.merge(secondMapper, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> firstMapper.merge(secondMapper, MapperMergeContext.root(false, false, MergeReason.MAPPING_UPDATE, Long.MAX_VALUE)) ); assertThat(e.getMessage(), containsString("[include_in_parent] parameter can't be updated on a nested object mapping")); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java index 94a4c2ea92fbb..25ef3c8550ec0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ObjectMapperMergeTests.java @@ -13,6 +13,9 @@ import java.util.Collections; +import static org.elasticsearch.index.mapper.MapperService.MergeReason.INDEX_TEMPLATE; +import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_UPDATE; + public final class ObjectMapperMergeTests extends ESTestCase { private final RootObjectMapper rootObjectMapper = createMapping(false, true, true, false); @@ -41,7 +44,10 @@ public void testMerge() { ObjectMapper mergeWith = createMapping(false, true, true, true); // WHEN merging mappings - final ObjectMapper merged = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + final ObjectMapper merged = rootObjectMapper.merge( + mergeWith, + MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE) + ); // THEN "baz" new field is added to merged mapping final ObjectMapper mergedFoo = (ObjectMapper) merged.getMapper("foo"); @@ -63,7 +69,7 @@ public void testMergeWhenDisablingField() { // THEN a MapperException is thrown with an excepted message MapperException e = expectThrows( MapperException.class, - () -> rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); assertEquals("the [enabled] parameter can't be updated for the object mapping [foo]", e.getMessage()); } @@ -75,7 +81,7 @@ public void testMergeDisabledField() { new ObjectMapper.Builder("disabled", Explicit.IMPLICIT_TRUE) ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper merged = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + RootObjectMapper merged = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); assertFalse(((ObjectMapper) merged.getMapper("disabled")).isEnabled()); } @@ -84,14 +90,11 @@ public void testMergeEnabled() { MapperException e = expectThrows( MapperException.class, - () -> rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); assertEquals("the [enabled] parameter can't be updated for the object mapping [disabled]", e.getMessage()); - ObjectMapper result = rootObjectMapper.merge( - mergeWith, - MapperMergeContext.root(false, false, MapperService.MergeReason.INDEX_TEMPLATE, Long.MAX_VALUE) - ); + ObjectMapper result = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, INDEX_TEMPLATE, Long.MAX_VALUE)); assertTrue(result.isEnabled()); } @@ -105,14 +108,11 @@ public void testMergeEnabledForRootMapper() { MapperException e = expectThrows( MapperException.class, - () -> firstMapper.merge(secondMapper, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> firstMapper.merge(secondMapper, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); assertEquals("the [enabled] parameter can't be updated for the object mapping [" + type + "]", e.getMessage()); - ObjectMapper result = firstMapper.merge( - secondMapper, - MapperMergeContext.root(false, false, MapperService.MergeReason.INDEX_TEMPLATE, Long.MAX_VALUE) - ); + ObjectMapper result = firstMapper.merge(secondMapper, MapperMergeContext.root(false, false, INDEX_TEMPLATE, Long.MAX_VALUE)); assertFalse(result.isEnabled()); } @@ -126,7 +126,7 @@ public void testMergeDisabledRootMapper() { Collections.singletonMap("test", new TestRuntimeField("test", "long")) ).build(MapperBuilderContext.root(false, false)); - RootObjectMapper merged = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + RootObjectMapper merged = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); assertFalse(merged.isEnabled()); assertEquals(1, merged.runtimeFields().size()); assertEquals("test", merged.runtimeFields().iterator().next().name()); @@ -136,7 +136,7 @@ public void testMergedFieldNamesFieldWithDotsSubobjectsFalseAtRoot() { RootObjectMapper mergeInto = createRootSubobjectFalseLeafWithDots(); RootObjectMapper mergeWith = createRootSubobjectFalseLeafWithDots(); - final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); final KeywordFieldMapper keywordFieldMapper = (KeywordFieldMapper) merged.getMapper("host.name"); assertEquals("host.name", keywordFieldMapper.name()); @@ -151,7 +151,7 @@ public void testMergedFieldNamesFieldWithDotsSubobjectsFalse() { createObjectSubobjectsFalseLeafWithDots() ).build(MapperBuilderContext.root(false, false)); - final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); ObjectMapper foo = (ObjectMapper) merged.getMapper("foo"); ObjectMapper metrics = (ObjectMapper) foo.getMapper("metrics"); @@ -166,7 +166,7 @@ public void testMergedFieldNamesMultiFields() { RootObjectMapper mergeWith = new RootObjectMapper.Builder("_doc", Explicit.IMPLICIT_TRUE).add(createTextKeywordMultiField("text")) .build(MapperBuilderContext.root(false, false)); - final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); TextFieldMapper text = (TextFieldMapper) merged.getMapper("text"); assertEquals("text", text.name()); @@ -184,7 +184,7 @@ public void testMergedFieldNamesMultiFieldsWithinSubobjectsFalse() { createObjectSubobjectsFalseLeafWithMultiField() ).build(MapperBuilderContext.root(false, false)); - final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + final ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); ObjectMapper foo = (ObjectMapper) merged.getMapper("foo"); ObjectMapper metrics = (ObjectMapper) foo.getMapper("metrics"); @@ -201,8 +201,8 @@ public void testMergeWithLimit() { ObjectMapper mergeWith = createMapping(false, true, true, true); // WHEN merging mappings - final ObjectMapper mergedAdd0 = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, 0)); - final ObjectMapper mergedAdd1 = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, 1)); + final ObjectMapper mergedAdd0 = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 0)); + final ObjectMapper mergedAdd1 = rootObjectMapper.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 1)); // THEN "baz" new field is added to merged mapping assertEquals(3, rootObjectMapper.getTotalFieldsCount()); @@ -219,10 +219,10 @@ public void testMergeWithLimitTruncatedObjectField() { ).add(new KeywordFieldMapper.Builder("child2", IndexVersion.current())) ).build(MapperBuilderContext.root(false, false)); - ObjectMapper mergedAdd0 = root.merge(mergeWith, MapperMergeContext.root(false, false, 0)); - ObjectMapper mergedAdd1 = root.merge(mergeWith, MapperMergeContext.root(false, false, 1)); - ObjectMapper mergedAdd2 = root.merge(mergeWith, MapperMergeContext.root(false, false, 2)); - ObjectMapper mergedAdd3 = root.merge(mergeWith, MapperMergeContext.root(false, false, 3)); + ObjectMapper mergedAdd0 = root.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 0)); + ObjectMapper mergedAdd1 = root.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 1)); + ObjectMapper mergedAdd2 = root.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 2)); + ObjectMapper mergedAdd3 = root.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 3)); assertEquals(0, root.getTotalFieldsCount()); assertEquals(0, mergedAdd0.getTotalFieldsCount()); assertEquals(1, mergedAdd1.getTotalFieldsCount()); @@ -252,8 +252,8 @@ public void testMergeSameObjectDifferentFields() { ).add(new KeywordFieldMapper.Builder("child2", IndexVersion.current())) ).build(MapperBuilderContext.root(false, false)); - ObjectMapper mergedAdd0 = root.merge(mergeWith, MapperMergeContext.root(false, false, 0)); - ObjectMapper mergedAdd1 = root.merge(mergeWith, MapperMergeContext.root(false, false, 1)); + ObjectMapper mergedAdd0 = root.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 0)); + ObjectMapper mergedAdd1 = root.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 1)); assertEquals(2, root.getTotalFieldsCount()); assertEquals(2, mergedAdd0.getTotalFieldsCount()); assertEquals(3, mergedAdd1.getTotalFieldsCount()); @@ -280,8 +280,8 @@ public void testMergeWithLimitMultiField() { assertEquals(2, mergeInto.getTotalFieldsCount()); assertEquals(2, mergeWith.getTotalFieldsCount()); - ObjectMapper mergedAdd0 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, 0)); - ObjectMapper mergedAdd1 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, 1)); + ObjectMapper mergedAdd0 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 0)); + ObjectMapper mergedAdd1 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 1)); assertEquals(2, mergedAdd0.getTotalFieldsCount()); assertEquals(3, mergedAdd1.getTotalFieldsCount()); } @@ -297,8 +297,8 @@ public void testMergeWithLimitRuntimeField() { assertEquals(3, mergeInto.getTotalFieldsCount()); assertEquals(2, mergeWith.getTotalFieldsCount()); - ObjectMapper mergedAdd0 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, 0)); - ObjectMapper mergedAdd1 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, 1)); + ObjectMapper mergedAdd0 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 0)); + ObjectMapper mergedAdd1 = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, 1)); assertEquals(3, mergedAdd0.getTotalFieldsCount()); assertEquals(4, mergedAdd1.getTotalFieldsCount()); } @@ -315,7 +315,7 @@ public void testMergeSubobjectsFalseWithObject() { ) ).build(MapperBuilderContext.root(false, false)); - ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + ObjectMapper merged = mergeInto.merge(mergeWith, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); ObjectMapper parentMapper = (ObjectMapper) merged.getMapper("parent"); assertNotNull(parentMapper); assertNotNull(parentMapper.getMapper("child.grandchild")); @@ -332,7 +332,16 @@ private static RootObjectMapper createRootSubobjectFalseLeafWithDots() { private static ObjectMapper.Builder createObjectSubobjectsFalseLeafWithDots() { KeywordFieldMapper.Builder fieldBuilder = new KeywordFieldMapper.Builder("host.name", IndexVersion.current()); - KeywordFieldMapper fieldMapper = fieldBuilder.build(new MapperBuilderContext("foo.metrics")); + KeywordFieldMapper fieldMapper = fieldBuilder.build( + new MapperBuilderContext( + "foo.metrics", + false, + false, + false, + ObjectMapper.Defaults.DYNAMIC, + MapperService.MergeReason.MAPPING_UPDATE + ) + ); assertEquals("host.name", fieldMapper.simpleName()); assertEquals("foo.metrics.host.name", fieldMapper.name()); return new ObjectMapper.Builder("foo", ObjectMapper.Defaults.SUBOBJECTS).add( @@ -342,7 +351,16 @@ private static ObjectMapper.Builder createObjectSubobjectsFalseLeafWithDots() { private ObjectMapper.Builder createObjectSubobjectsFalseLeafWithMultiField() { TextFieldMapper.Builder fieldBuilder = createTextKeywordMultiField("host.name"); - TextFieldMapper textKeywordMultiField = fieldBuilder.build(new MapperBuilderContext("foo.metrics")); + TextFieldMapper textKeywordMultiField = fieldBuilder.build( + new MapperBuilderContext( + "foo.metrics", + false, + false, + false, + ObjectMapper.Defaults.DYNAMIC, + MapperService.MergeReason.MAPPING_UPDATE + ) + ); assertEquals("host.name", textKeywordMultiField.simpleName()); assertEquals("foo.metrics.host.name", textKeywordMultiField.name()); FieldMapper fieldMapper = textKeywordMultiField.multiFields.iterator().next(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java index 0ec1997ae652e..308f775ec7b28 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java @@ -40,6 +40,7 @@ import java.util.Map; import java.util.Objects; +import static org.elasticsearch.index.mapper.MapperService.MergeReason.MAPPING_UPDATE; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Mockito.mock; @@ -348,7 +349,7 @@ public void testMerging() { {"type":"test_mapper","fixed":true,"fixed2":true,"required":"value"}"""); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> mapper.merge(badMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> mapper.merge(badMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); String expectedError = """ Mapper for [field] conflicts with existing mapper: @@ -361,7 +362,7 @@ public void testMerging() { // TODO: should we have to include 'fixed' here? Or should updates take as 'defaults' the existing values? TestMapper goodMerge = fromMapping(""" {"type":"test_mapper","fixed":false,"variable":"updated","required":"value"}"""); - TestMapper merged = (TestMapper) mapper.merge(goodMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + TestMapper merged = (TestMapper) mapper.merge(goodMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); assertEquals("{\"field\":" + mapping + "}", Strings.toString(mapper)); // original mapping is unaffected assertEquals(""" @@ -379,7 +380,7 @@ public void testMultifields() throws IOException { String addSubField = """ {"type":"test_mapper","variable":"foo","required":"value","fields":{"sub2":{"type":"keyword"}}}"""; TestMapper toMerge = fromMapping(addSubField); - TestMapper merged = (TestMapper) mapper.merge(toMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + TestMapper merged = (TestMapper) mapper.merge(toMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); assertEquals(XContentHelper.stripWhitespace(""" { "field": { @@ -402,7 +403,7 @@ public void testMultifields() throws IOException { TestMapper badToMerge = fromMapping(badSubField); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> merged.merge(badToMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> merged.merge(badToMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); assertEquals("mapper [field.sub2] cannot be changed from type [keyword] to [binary]", e.getMessage()); } @@ -418,13 +419,16 @@ public void testCopyTo() { TestMapper toMerge = fromMapping(""" {"type":"test_mapper","variable":"updated","required":"value","copy_to":["foo","bar"]}"""); - TestMapper merged = (TestMapper) mapper.merge(toMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + TestMapper merged = (TestMapper) mapper.merge(toMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)); assertEquals(""" {"field":{"type":"test_mapper","variable":"updated","required":"value","copy_to":["foo","bar"]}}""", Strings.toString(merged)); TestMapper removeCopyTo = fromMapping(""" {"type":"test_mapper","variable":"updated","required":"value"}"""); - TestMapper noCopyTo = (TestMapper) merged.merge(removeCopyTo, MapperMergeContext.root(false, false, Long.MAX_VALUE)); + TestMapper noCopyTo = (TestMapper) merged.merge( + removeCopyTo, + MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE) + ); assertEquals(""" {"field":{"type":"test_mapper","variable":"updated","required":"value"}}""", Strings.toString(noCopyTo)); } @@ -473,7 +477,7 @@ public void testObjectSerialization() throws IOException { MapperService mapperService = createMapperService(mapping); assertEquals(mapping, Strings.toString(mapperService.documentMapper().mapping())); - mapperService.merge("_doc", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge("_doc", new CompressedXContent(mapping), MAPPING_UPDATE); assertEquals(mapping, Strings.toString(mapperService.documentMapper().mapping())); } @@ -490,7 +494,7 @@ public void testCustomSerialization() { TestMapper toMerge = fromMapping(conflict); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> mapper.merge(toMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> mapper.merge(toMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); assertEquals( "Mapper for [field] conflicts with existing mapper:\n" @@ -581,7 +585,7 @@ public void testAnalyzers() { TestMapper toMerge = fromMapping(mapping); e = expectThrows( IllegalArgumentException.class, - () -> original.merge(toMerge, MapperMergeContext.root(false, false, Long.MAX_VALUE)) + () -> original.merge(toMerge, MapperMergeContext.root(false, false, MAPPING_UPDATE, Long.MAX_VALUE)) ); assertEquals( "Mapper for [field] conflicts with existing mapper:\n" + "\tCannot update parameter [analyzer] from [default] to [_standard]", diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java index a7cc74582afdc..33a593f5aa125 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java @@ -227,13 +227,9 @@ public void testJarHellDuplicateCodebaseAcrossDeps() throws Exception { transitiveDeps.put("dep2", Collections.singleton(dupJar.toUri().toURL())); PluginDescriptor info1 = newTestDescriptor("myplugin", List.of("dep1", "dep2")); PluginBundle bundle = new PluginBundle(info1, pluginDir); - IllegalStateException e = expectThrows( - IllegalStateException.class, - () -> PluginsUtils.checkBundleJarHell(JarHell.parseModulesAndClassPath(), bundle, transitiveDeps) - ); - assertEquals("failed to load plugin myplugin due to jar hell", e.getMessage()); - assertThat(e.getCause().getMessage(), containsString("jar hell!")); - assertThat(e.getCause().getMessage(), containsString("duplicate codebases")); + PluginsUtils.checkBundleJarHell(JarHell.parseModulesAndClassPath(), bundle, transitiveDeps); + Set transitive = transitiveDeps.get("myplugin"); + assertThat(transitive, containsInAnyOrder(pluginJar.toUri().toURL(), dupJar.toUri().toURL())); } // Note: testing dup codebase with core is difficult because it requires a symlink, but we have mock filesystems and security manager diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index 7be1dcdcf7b77..83cb189415f7e 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -192,7 +192,9 @@ public void testRegisterRejectsInvalidRepositoryNames() { public void testPutRepositoryVerificationFails() { var repoName = randomAlphaOfLengthBetween(10, 25); - var request = new PutRepositoryRequest().name(repoName).type(VerificationFailRepository.TYPE).verify(true); + var request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name(repoName) + .type(VerificationFailRepository.TYPE) + .verify(true); var resultListener = new SubscribableListener(); repositoriesService.registerRepository(request, resultListener); var failure = safeAwaitFailure(resultListener); @@ -203,14 +205,18 @@ public void testPutRepositoryVerificationFails() { public void testPutRepositoryVerificationFailsOnExisting() { var repoName = randomAlphaOfLengthBetween(10, 25); - var request = new PutRepositoryRequest().name(repoName).type(TestRepository.TYPE).verify(true); + var request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name(repoName) + .type(TestRepository.TYPE) + .verify(true); var resultListener = new SubscribableListener(); repositoriesService.registerRepository(request, resultListener); var ackResponse = safeAwait(resultListener); assertTrue(ackResponse.isAcknowledged()); // try to update existing repository with faulty repo and make sure it is not applied - request = new PutRepositoryRequest().name(repoName).type(VerificationFailRepository.TYPE).verify(true); + request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name(repoName) + .type(VerificationFailRepository.TYPE) + .verify(true); resultListener = new SubscribableListener<>(); repositoriesService.registerRepository(request, resultListener); var failure = safeAwaitFailure(resultListener); @@ -221,7 +227,9 @@ public void testPutRepositoryVerificationFailsOnExisting() { public void testPutRepositorySkipVerification() { var repoName = randomAlphaOfLengthBetween(10, 25); - var request = new PutRepositoryRequest().name(repoName).type(VerificationFailRepository.TYPE).verify(false); + var request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name(repoName) + .type(VerificationFailRepository.TYPE) + .verify(false); var resultListener = new SubscribableListener(); repositoriesService.registerRepository(request, resultListener); var ackResponse = safeAwait(resultListener); @@ -280,7 +288,7 @@ public void testRemoveUnknownRepositoryTypeWhenApplyingClusterState() { public void testRegisterRepositoryFailsForUnknownType() { var repoName = randomAlphaOfLengthBetween(10, 25); - var request = new PutRepositoryRequest().name(repoName).type("unknown"); + var request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name(repoName).type("unknown"); repositoriesService.registerRepository(request, new ActionListener<>() { @Override @@ -359,7 +367,7 @@ public void testRegisterRepositorySuccessAfterCreationFailed() { assertThat(repo, isA(InvalidRepository.class)); // 2. repository creation successfully when current node become master node and repository is put again - var request = new PutRepositoryRequest().name(repoName).type(TestRepository.TYPE); + var request = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name(repoName).type(TestRepository.TYPE); var resultListener = new SubscribableListener(); repositoriesService.registerRepository(request, resultListener); @@ -385,7 +393,13 @@ private ClusterState emptyState() { } private void assertThrowsOnRegister(String repoName) { - expectThrows(RepositoryException.class, () -> repositoriesService.registerRepository(new PutRepositoryRequest(repoName), null)); + expectThrows( + RepositoryException.class, + () -> repositoriesService.registerRepository( + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName), + null + ) + ); } private static class TestRepository implements Repository { diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java index 0d94c027f8c46..1361385521378 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java @@ -172,28 +172,36 @@ public void testDeleteThrottling() { assertAcked( client().admin() .cluster() - .preparePutRepository(TEST_REPO_NAME) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", repoPath)) ); - client().admin().cluster().prepareCreateSnapshot(TEST_REPO_NAME, "snapshot-1").setWaitForCompletion(true).get(); - client().admin().cluster().prepareCreateSnapshot(TEST_REPO_NAME, "snapshot-2").setWaitForCompletion(true).get(); + client().admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, "snapshot-1") + .setWaitForCompletion(true) + .get(); + client().admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, "snapshot-2") + .setWaitForCompletion(true) + .get(); - assertAcked(client().admin().cluster().prepareDeleteRepository(TEST_REPO_NAME)); + assertAcked(client().admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME)); // Now delete one of the snapshots using the test repo implementation which verifies the throttling behaviour assertAcked( client().admin() .cluster() - .preparePutRepository(TEST_REPO_NAME) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) .setType(TEST_REPO_TYPE) .setSettings(Settings.builder().put("location", repoPath)) ); - assertAcked(client().admin().cluster().prepareDeleteSnapshot(TEST_REPO_NAME, "snapshot-1").get()); + assertAcked(client().admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, "snapshot-1").get()); - assertAcked(client().admin().cluster().prepareDeleteRepository(TEST_REPO_NAME)); + assertAcked(client().admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME)); } } diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 486390f27391c..ac23f646e5c52 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -58,7 +58,6 @@ import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLog; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; @@ -106,7 +105,7 @@ public void testRetrieveSnapshots() { logger.info("--> creating repository"); AcknowledgedResponse putRepositoryResponse = client.admin() .cluster() - .preparePutRepository(TEST_REPO_NAME) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) .setType(REPO_TYPE) .setSettings(Settings.builder().put(node().settings()).put("location", location)) .get(); @@ -126,7 +125,7 @@ public void testRetrieveSnapshots() { logger.info("--> create first snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(TEST_REPO_NAME, "test-snap-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, "test-snap-1") .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -135,7 +134,7 @@ public void testRetrieveSnapshots() { logger.info("--> create second snapshot"); createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(TEST_REPO_NAME, "test-snap-2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, "test-snap-2") .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -256,7 +255,7 @@ public void testBadChunksize() { RepositoryException.class, () -> client.admin() .cluster() - .preparePutRepository(TEST_REPO_NAME) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) .setType(REPO_TYPE) .setSettings( Settings.builder() @@ -286,10 +285,11 @@ public void testRepositoryDataDetails() throws Exception { ); final long beforeStartTime = getInstanceFromNode(ThreadPool.class).absoluteTimeInMillis(); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repositoryName, "test-snap-1") - .setWaitForCompletion(true) - .setPartial(true) - .get(); + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repositoryName, + "test-snap-1" + ).setWaitForCompletion(true).setPartial(true).get(); final long afterEndTime = System.currentTimeMillis(); assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL)); @@ -347,7 +347,7 @@ private BlobStoreRepository setupRepo() { } AcknowledgedResponse putRepositoryResponse = client.admin() .cluster() - .preparePutRepository(TEST_REPO_NAME) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) .setType(REPO_TYPE) .setSettings(repoSettings) .setVerify(false) // prevent eager reading of repo data @@ -364,7 +364,10 @@ private BlobStoreRepository setupRepo() { @After public void removeRepo() { try { - client().admin().cluster().prepareDeleteRepository(TEST_REPO_NAME).get(TimeValue.timeValueSeconds(10)); + client().admin() + .cluster() + .prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) + .get(TimeValue.timeValueSeconds(10)); } catch (RepositoryMissingException e) { // ok, not all tests create the test repo } @@ -512,39 +515,50 @@ private Environment createEnvironment() { public void testShardBlobsToDelete() { final var repo = setupRepo(); - final var shardBlobsToDelete = repo.new ShardBlobsToDelete(); - final var expectedShardGenerations = ShardGenerations.builder(); - final var expectedBlobsToDelete = new HashSet(); - - final var countDownLatch = new CountDownLatch(1); - try (var refs = new RefCountingRunnable(countDownLatch::countDown)) { - for (int index = between(0, 10); index > 0; index--) { - final var indexId = new IndexId(randomIdentifier(), randomUUID()); - for (int shard = between(1, 3); shard > 0; shard--) { - final var shardId = shard; - final var shardGeneration = new ShardGeneration(randomUUID()); - expectedShardGenerations.put(indexId, shard, shardGeneration); - final var blobsToDelete = randomList(10, ESTestCase::randomIdentifier); - final var indexPath = repo.basePath().add("indices").add(indexId.getId()).add(Integer.toString(shard)).buildAsString(); - for (final var blobToDelete : blobsToDelete) { - expectedBlobsToDelete.add(indexPath + blobToDelete); - } - - repo.threadPool() - .generic() - .execute( - ActionRunnable.run( - refs.acquireListener(), - () -> shardBlobsToDelete.addShardDeleteResult(indexId, shardId, shardGeneration, blobsToDelete) - ) + try (var shardBlobsToDelete = repo.new ShardBlobsToDelete()) { + final var expectedShardGenerations = ShardGenerations.builder(); + final var expectedBlobsToDelete = new HashSet(); + + final var countDownLatch = new CountDownLatch(1); + int blobCount = 0; + try (var refs = new RefCountingRunnable(countDownLatch::countDown)) { + for (int index = between(0, 1000); index > 0; index--) { + final var indexId = new IndexId(randomIdentifier(), randomUUID()); + for (int shard = between(1, 30); shard > 0; shard--) { + final var shardId = shard; + final var shardGeneration = new ShardGeneration(randomUUID()); + expectedShardGenerations.put(indexId, shard, shardGeneration); + final var blobsToDelete = randomList( + 100, + () -> randomFrom("meta-", "index-", "snap-") + randomUUID() + randomFrom("", ".dat") ); + blobCount += blobsToDelete.size(); + final var indexPath = repo.basePath() + .add("indices") + .add(indexId.getId()) + .add(Integer.toString(shard)) + .buildAsString(); + for (final var blobToDelete : blobsToDelete) { + expectedBlobsToDelete.add(indexPath + blobToDelete); + } + + repo.threadPool() + .generic() + .execute( + ActionRunnable.run( + refs.acquireListener(), + () -> shardBlobsToDelete.addShardDeleteResult(indexId, shardId, shardGeneration, blobsToDelete) + ) + ); + } } } + safeAwait(countDownLatch); + assertEquals(expectedShardGenerations.build(), shardBlobsToDelete.getUpdatedShardGenerations()); + shardBlobsToDelete.getBlobPaths().forEachRemaining(s -> assertTrue(expectedBlobsToDelete.remove(s))); + assertThat(expectedBlobsToDelete, empty()); + assertThat(shardBlobsToDelete.sizeInBytes(), lessThanOrEqualTo(Math.max(ByteSizeUnit.KB.toIntBytes(1), 20 * blobCount))); } - safeAwait(countDownLatch); - assertEquals(expectedShardGenerations.build(), shardBlobsToDelete.getUpdatedShardGenerations()); - shardBlobsToDelete.getBlobPaths().forEachRemaining(s -> assertTrue(expectedBlobsToDelete.remove(s))); - assertThat(expectedBlobsToDelete, empty()); } public void testUuidCreationLogging() { @@ -555,7 +569,10 @@ public void testUuidCreationLogging() { MockLog.assertThatLogger( () -> safeGet( - client().execute(TransportCreateSnapshotAction.TYPE, new CreateSnapshotRequest(repoName, snapshot).waitForCompletion(true)) + client().execute( + TransportCreateSnapshotAction.TYPE, + new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshot).waitForCompletion(true) + ) ), BlobStoreRepository.class, new MockLog.SeenEventExpectation( @@ -569,14 +586,21 @@ public void testUuidCreationLogging() { MockLog.assertThatLogger( // no more "Generated" messages ... () -> { - safeGet(client().execute(TransportDeleteRepositoryAction.TYPE, new DeleteRepositoryRequest(repoName))); + safeGet( + client().execute( + TransportDeleteRepositoryAction.TYPE, + new DeleteRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) + ) + ); // we get a "Registering" message when re-registering the repository with ?verify=true (the default) MockLog.assertThatLogger( () -> safeGet( client().execute( TransportPutRepositoryAction.TYPE, - new PutRepositoryRequest(repoName).type("fs").verify(true).settings(repoMetadata.settings()) + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).type("fs") + .verify(true) + .settings(repoMetadata.settings()) ) ), RepositoriesService.class, @@ -591,23 +615,31 @@ public void testUuidCreationLogging() { safeGet( client().execute( TransportCreateSnapshotAction.TYPE, - new CreateSnapshotRequest(repoName, randomIdentifier()).waitForCompletion(true) + new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, randomIdentifier()).waitForCompletion(true) ) ); assertTrue( - safeGet(client().execute(TransportGetSnapshotsAction.TYPE, new GetSnapshotsRequest(repoName))).getSnapshots() + safeGet(client().execute(TransportGetSnapshotsAction.TYPE, new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, repoName))) + .getSnapshots() .stream() .anyMatch(snapshotInfo -> snapshotInfo.snapshotId().getName().equals(snapshot)) ); - safeGet(client().execute(TransportDeleteRepositoryAction.TYPE, new DeleteRepositoryRequest(repoName))); + safeGet( + client().execute( + TransportDeleteRepositoryAction.TYPE, + new DeleteRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) + ) + ); // No "Registering" message with ?verify=false because we don't read the repo data yet MockLog.assertThatLogger( () -> safeGet( client().execute( TransportPutRepositoryAction.TYPE, - new PutRepositoryRequest(repoName).type("fs").verify(false).settings(repoMetadata.settings()) + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName).type("fs") + .verify(false) + .settings(repoMetadata.settings()) ) ), RepositoriesService.class, @@ -624,7 +656,7 @@ public void testUuidCreationLogging() { () -> safeGet( client().execute( TransportCreateSnapshotAction.TYPE, - new CreateSnapshotRequest(repoName, randomIdentifier()).waitForCompletion(true) + new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, randomIdentifier()).waitForCompletion(true) ) ), RepositoriesService.class, diff --git a/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java index 2d84dfd0cc907..0d0293b962609 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RestoreServiceTests.java @@ -68,7 +68,7 @@ public void testUpdateDataStream() { Index updatedFailureIndex = new Index(failureIndexName, randomUUID()); when(failureIndexMetadata.getIndex()).thenReturn(updatedFailureIndex); - RestoreSnapshotRequest request = new RestoreSnapshotRequest(); + RestoreSnapshotRequest request = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT); DataStream updateDataStream = RestoreService.updateDataStream(dataStream, metadata, request); @@ -103,7 +103,8 @@ public void testUpdateDataStreamRename() { Index renamedFailureIndex = new Index(renamedFailureIndexName, randomUUID()); when(failureIndexMetadata.getIndex()).thenReturn(renamedFailureIndex); - RestoreSnapshotRequest request = new RestoreSnapshotRequest().renamePattern("data-stream-1").renameReplacement("data-stream-2"); + RestoreSnapshotRequest request = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT).renamePattern("data-stream-1") + .renameReplacement("data-stream-2"); DataStream renamedDataStream = RestoreService.updateDataStream(dataStream, metadata, request); @@ -138,7 +139,7 @@ public void testPrefixNotChanged() { Index renamedFailureIndex = new Index(renamedFailureIndexName, randomUUID()); when(failureIndexMetadata.getIndex()).thenReturn(renamedFailureIndex); - RestoreSnapshotRequest request = new RestoreSnapshotRequest().renamePattern("ds-").renameReplacement("ds2-"); + RestoreSnapshotRequest request = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT).renamePattern("ds-").renameReplacement("ds2-"); DataStream renamedDataStream = RestoreService.updateDataStream(dataStream, metadata, request); @@ -146,7 +147,7 @@ public void testPrefixNotChanged() { assertEquals(List.of(renamedIndex), renamedDataStream.getIndices()); assertEquals(List.of(renamedFailureIndex), renamedDataStream.getFailureIndices().getIndices()); - request = new RestoreSnapshotRequest().renamePattern("ds-000001").renameReplacement("ds2-000001"); + request = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT).renamePattern("ds-000001").renameReplacement("ds2-000001"); renamedDataStream = RestoreService.updateDataStream(dataStream, metadata, request); @@ -216,7 +217,7 @@ public void testRefreshRepositoryUuidsRefreshesAsNeeded() { public void testNotAllowToRestoreGlobalStateFromSnapshotWithoutOne() { - var request = new RestoreSnapshotRequest().includeGlobalState(true); + var request = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT).includeGlobalState(true); var repository = new RepositoryMetadata("name", "type", Settings.EMPTY); var snapshot = new Snapshot("repository", new SnapshotId("name", "uuid")); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotRequestsTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotRequestsTests.java index 11b6c98d0f40c..23b1cb63b289b 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotRequestsTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotRequestsTests.java @@ -22,7 +22,7 @@ public class SnapshotRequestsTests extends ESTestCase { public void testRestoreSnapshotRequestParsing() throws IOException { - RestoreSnapshotRequest request = new RestoreSnapshotRequest("test-repo", "test-snap"); + RestoreSnapshotRequest request = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap"); XContentBuilder builder = jsonBuilder().startObject(); @@ -87,7 +87,7 @@ public void testRestoreSnapshotRequestParsing() throws IOException { } public void testCreateSnapshotRequestParsing() throws IOException { - CreateSnapshotRequest request = new CreateSnapshotRequest("test-repo", "test-snap"); + CreateSnapshotRequest request = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap"); XContentBuilder builder = jsonBuilder().startObject(); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index f4aa44f143c40..b40e33c4baba8 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -268,7 +268,7 @@ public void verifyReposThenStopServices() { // failures seen during the previous test. client().admin() .cluster() - .prepareCreateSnapshot("repo", "last-snapshot") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "repo", "last-snapshot") .setWaitForCompletion(true) .setPartial(true) .execute(createSnapshotResponse); @@ -278,7 +278,9 @@ public void verifyReposThenStopServices() { assertThat(snapshotInfo.state(), either(is(SnapshotState.SUCCESS)).or(is(SnapshotState.PARTIAL))); assertThat(snapshotInfo.shardFailures(), iterableWithSize(snapshotInfo.failedShards())); assertThat(snapshotInfo.successfulShards(), is(snapshotInfo.totalShards() - snapshotInfo.failedShards())); - client().admin().cluster().cleanupRepository(new CleanupRepositoryRequest("repo"), cleanupResponse); + client().admin() + .cluster() + .cleanupRepository(new CleanupRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo"), cleanupResponse); }); final AtomicBoolean cleanedUp = new AtomicBoolean(false); continueOrDie(cleanupResponse, r -> cleanedUp.set(true)); @@ -317,7 +319,7 @@ public void testSuccessfulSnapshotAndRestore() { continueOrDie(createRepoAndIndex(repoName, index, shards), createIndexResponse -> { final Runnable afterIndexing = () -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createSnapshotResponseListener); if (documents == 0) { @@ -350,7 +352,7 @@ public void testSuccessfulSnapshotAndRestore() { ignored -> client().admin() .cluster() .restoreSnapshot( - new RestoreSnapshotRequest(repoName, snapshotName).waitForCompletion(true), + new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).waitForCompletion(true), restoreSnapshotResponseListener ) ); @@ -415,7 +417,7 @@ public void testSnapshotWithNodeDisconnects() { } testClusterNodes.randomMasterNodeSafe().client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setPartial(partial) .execute(createSnapshotResponseStepListener); }); @@ -492,7 +494,7 @@ public void testSnapshotDeleteWithMasterFailover() { createRepoAndIndex(repoName, index, shards), createIndexResponse -> testClusterNodes.randomMasterNodeSafe().client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(waitForSnapshot) .execute(createSnapshotResponseStepListener) ); @@ -502,7 +504,7 @@ public void testSnapshotDeleteWithMasterFailover() { scheduleNow(this::disconnectOrRestartMasterNode); testClusterNodes.randomDataNodeSafe().client.admin() .cluster() - .prepareDeleteSnapshot(repoName, snapshotName) + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .execute(ActionListener.running(() -> snapshotDeleteResponded.set(true))); }); @@ -544,7 +546,7 @@ public void testConcurrentSnapshotCreateAndDelete() { createRepoAndIndex(repoName, index, shards), createIndexResponse -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .execute(createSnapshotResponseStepListener) ); @@ -554,7 +556,10 @@ public void testConcurrentSnapshotCreateAndDelete() { @Override public void clusterChanged(ClusterChangedEvent event) { if (SnapshotsInProgress.get(event.state()).isEmpty() == false) { - client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(deleteSnapshotStepListener); + client().admin() + .cluster() + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .execute(deleteSnapshotStepListener); masterNode.clusterService.removeListener(this); } } @@ -566,7 +571,7 @@ public void clusterChanged(ClusterChangedEvent event) { deleteSnapshotStepListener, acknowledgedResponse -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createAnotherSnapshotResponseStepListener) ); @@ -609,7 +614,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { createRepoAndIndex(repoName, index, shards), createIndexResponse -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createSnapshotResponseStepListener) ); @@ -620,7 +625,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { createSnapshotResponseStepListener, createSnapshotResponse -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, "snapshot-2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-2") .execute(createOtherSnapshotResponseStepListener) ); @@ -630,7 +635,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { createOtherSnapshotResponseStepListener, createSnapshotResponse -> client().admin() .cluster() - .prepareDeleteSnapshot(repoName, snapshotName) + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .execute(deleteSnapshotStepListener) ); @@ -639,7 +644,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { continueOrDie(deleteSnapshotStepListener, deleted -> { client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createAnotherSnapshotResponseStepListener); continueOrDie( @@ -683,7 +688,7 @@ public void testBulkSnapshotDeleteWithAbort() { createRepoAndIndex(repoName, index, shards), createIndexResponse -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createSnapshotResponseStepListener) ); @@ -697,7 +702,10 @@ public void testBulkSnapshotDeleteWithAbort() { continueOrDie(createSnapshotResponseStepListener, createSnapshotResponse -> { for (int i = 0; i < inProgressSnapshots; i++) { - client().admin().cluster().prepareCreateSnapshot(repoName, "other-" + i).execute(createSnapshotListener); + client().admin() + .cluster() + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "other-" + i) + .execute(createSnapshotListener); } }); @@ -707,7 +715,7 @@ public void testBulkSnapshotDeleteWithAbort() { createOtherSnapshotResponseStepListener, createSnapshotResponse -> client().admin() .cluster() - .deleteSnapshot(new DeleteSnapshotRequest(repoName, "*"), deleteSnapshotStepListener) + .deleteSnapshot(new DeleteSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, "*"), deleteSnapshotStepListener) ); deterministicTaskQueue.runAllRunnableTasks(); @@ -742,7 +750,7 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { index, () -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createSnapshotResponseStepListener) ) @@ -760,7 +768,7 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { index, () -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, secondSnapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, secondSnapshotName) .setWaitForCompletion(true) .execute(createOtherSnapshotResponseStepListener) ) @@ -770,12 +778,17 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { final SubscribableListener restoreSnapshotResponseListener = new SubscribableListener<>(); continueOrDie(createOtherSnapshotResponseStepListener, createSnapshotResponse -> { - scheduleNow(() -> client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(deleteSnapshotStepListener)); + scheduleNow( + () -> client().admin() + .cluster() + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .execute(deleteSnapshotStepListener) + ); scheduleNow( () -> client().admin() .cluster() .restoreSnapshot( - new RestoreSnapshotRequest(repoName, secondSnapshotName).waitForCompletion(true) + new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, secondSnapshotName).waitForCompletion(true) .renamePattern("(.+)") .renameReplacement("restored_$1"), restoreSnapshotResponseListener @@ -871,7 +884,7 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() throws IOException { createIndicesListener, createIndexResponses -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(false) .setPartial(partialSnapshot) .setIncludeGlobalState(randomBoolean()) @@ -946,7 +959,7 @@ public void testConcurrentDeletes() { createRepoAndIndex(repoName, index, shards), createIndexResponse -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createSnapshotResponseStepListener) ); @@ -962,7 +975,7 @@ public void testConcurrentDeletes() { for (SubscribableListener deleteListener : deleteSnapshotStepListeners) { client().admin() .cluster() - .prepareDeleteSnapshot(repoName, snapshotName) + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .execute(ActionListener.wrap(resp -> deleteListener.onResponse(true), e -> { final Throwable unwrapped = ExceptionsHelper.unwrap( e, @@ -1044,12 +1057,15 @@ public void run() { } testClusterNodes.randomDataNodeSafe().client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .execute(ActionListener.running(() -> { createdSnapshot.set(true); testClusterNodes.randomDataNodeSafe().client.admin() .cluster() - .deleteSnapshot(new DeleteSnapshotRequest(repoName, snapshotName), ActionListener.noop()); + .deleteSnapshot( + new DeleteSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName), + ActionListener.noop() + ); })); scheduleNow( () -> testClusterNodes.randomMasterNodeSafe().client.execute( @@ -1116,7 +1132,7 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { if (initiatedSnapshot.compareAndSet(false, true)) { client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(createSnapshotResponseStepListener); } @@ -1134,7 +1150,7 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { createSnapshotResponse -> client().admin() .cluster() .restoreSnapshot( - new RestoreSnapshotRequest(repoName, snapshotName).renamePattern(index) + new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).renamePattern(index) .renameReplacement(restoredIndex) .waitForCompletion(true), restoreSnapshotResponseStepListener @@ -1210,7 +1226,7 @@ public void testRunConcurrentSnapshots() { scheduleNow( () -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(snapshotListener) ); @@ -1305,7 +1321,7 @@ public TransportRequestHandler interceptHandler( try (var listeners = new RefCountingListener(stepListener)) { client().admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) .execute(listeners.acquire(createRepoResponse -> {})); @@ -1325,7 +1341,7 @@ public TransportRequestHandler interceptHandler( .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, originalSnapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, originalSnapshotName) .setWaitForCompletion(true) .execute(l.map(v -> null)) ); @@ -1341,7 +1357,7 @@ public TransportRequestHandler interceptHandler( stepListener, l -> client.admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setIndices(randomNonEmptySubsetOf(indices).toArray(String[]::new)) .setPartial(true) .execute(l.map(v1 -> null)) @@ -1366,7 +1382,7 @@ public TransportRequestHandler interceptHandler( ).addListener(l); client.admin() .cluster() - .prepareCloneSnapshot(repoName, originalSnapshotName, cloneName) + .prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repoName, originalSnapshotName, cloneName) .setIndices(randomNonEmptySubsetOf(indices).toArray(String[]::new)) .execute(ActionTestUtils.assertNoFailureListener(r -> {})); }))); @@ -1402,7 +1418,7 @@ public void testFullSnapshotUnassignedShards() { try (var listeners = new RefCountingListener(stepListener)) { client().admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) .execute(listeners.acquire(createRepoResponse -> {})); @@ -1429,7 +1445,7 @@ public void testFullSnapshotUnassignedShards() { .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, randomIdentifier()) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, randomIdentifier()) .setWaitForCompletion(randomBoolean()) .execute(new ActionListener<>() { @Override @@ -1480,7 +1496,7 @@ public void testSnapshotNameAlreadyInUseExceptionLogging() { .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(true) .execute(l) ) @@ -1488,7 +1504,7 @@ public void testSnapshotNameAlreadyInUseExceptionLogging() { .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, snapshotName) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setWaitForCompletion(randomBoolean()) .execute(new ActionListener<>() { @Override @@ -1507,7 +1523,7 @@ public void onFailure(Exception e) { .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCloneSnapshot(repoName, snapshotName, snapshotName) + .prepareCloneSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName, snapshotName) .setIndices("*") .execute(new ActionListener<>() { @Override @@ -1557,7 +1573,7 @@ public void testIndexNotFoundExceptionLogging() { .newForked( l -> client().admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) .execute(l) @@ -1566,7 +1582,7 @@ public void testIndexNotFoundExceptionLogging() { .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, randomIdentifier()) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, randomIdentifier()) .setIndices(indexName) .setWaitForCompletion(randomBoolean()) .execute(new ActionListener<>() { @@ -1609,7 +1625,7 @@ public void testIllegalArgumentExceptionLogging() { .newForked( l -> client().admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) .execute(l) @@ -1618,7 +1634,7 @@ public void testIllegalArgumentExceptionLogging() { .andThen( (l, ignored) -> client().admin() .cluster() - .prepareCreateSnapshot(repoName, randomIdentifier()) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, randomIdentifier()) .setFeatureStates("none", "none") .setWaitForCompletion(randomBoolean()) .execute(new ActionListener<>() { @@ -1663,7 +1679,7 @@ private SubscribableListener createRepoAndIndex(String repo client().admin() .cluster() - .preparePutRepository(repoName) + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) .setType(FsRepository.TYPE) .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) .execute(createRepositoryListener); diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java index ffbc7e62f1ca8..4eba899a4fb15 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java @@ -43,14 +43,14 @@ public void testSerialization() throws IOException { } public void testCreateSnapshotRequestDescrptions() { - CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(); + CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT); createSnapshotRequest.snapshot("snapshot_name"); createSnapshotRequest.repository("repo_name"); assertEquals("snapshot [repo_name:snapshot_name]", createSnapshotRequest.getDescription()); } public void testRestoreSnapshotRequestDescrptions() { - RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(); + RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT); restoreSnapshotRequest.snapshot("snapshot_name"); restoreSnapshotRequest.repository("repo_name"); assertEquals("snapshot [repo_name:snapshot_name]", restoreSnapshotRequest.getDescription()); diff --git a/test/external-modules/jvm-crash/build.gradle b/test/external-modules/jvm-crash/build.gradle new file mode 100644 index 0000000000000..7269f6aa9b995 --- /dev/null +++ b/test/external-modules/jvm-crash/build.gradle @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +import org.elasticsearch.gradle.internal.info.BuildParams + +apply plugin: 'elasticsearch.internal-java-rest-test' +// Necessary to use tests in Serverless +apply plugin: 'elasticsearch.internal-test-artifact' + +group = 'org.elasticsearch.plugin' + +esplugin { + description 'A test module that can trigger A JVM crash' + classname 'org.elasticsearch.test.jvm_crash.JvmCrashPlugin' +} + +tasks.named('javaRestTest') { + usesDefaultDistribution() + it.onlyIf("snapshot build") { BuildParams.isSnapshotBuild() } +} diff --git a/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/JvmCrashIT.java b/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/JvmCrashIT.java new file mode 100644 index 0000000000000..3e73310ee824f --- /dev/null +++ b/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/JvmCrashIT.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.jvm_crash; + +import org.apache.lucene.util.Constants; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.core.PathUtils; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.AbstractLocalClusterSpecBuilder; +import org.elasticsearch.test.cluster.local.DefaultEnvironmentProvider; +import org.elasticsearch.test.cluster.local.DefaultLocalClusterFactory; +import org.elasticsearch.test.cluster.local.DefaultLocalElasticsearchCluster; +import org.elasticsearch.test.cluster.local.DefaultSettingsProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.local.distribution.LocalDistributionResolver; +import org.elasticsearch.test.cluster.local.distribution.ReleasedDistributionResolver; +import org.elasticsearch.test.cluster.local.distribution.SnapshotDistributionResolver; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.hamcrest.Matcher; +import org.junit.AfterClass; +import org.junit.ClassRule; + +import java.io.BufferedReader; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.PrintStream; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.matchesRegex; +import static org.hamcrest.Matchers.not; + +public class JvmCrashIT extends ESRestTestCase { + + private static class StdOutCatchingClusterBuilder extends AbstractLocalClusterSpecBuilder { + + private StdOutCatchingClusterBuilder() { + this.settings(new DefaultSettingsProvider()); + this.environment(new DefaultEnvironmentProvider()); + } + + @Override + public ElasticsearchCluster build() { + // redirect stdout before the nodes start up + // they are referenced directly by ProcessUtils, so can't be changed afterwards + redirectStdout(); + + return new DefaultLocalElasticsearchCluster<>( + this::buildClusterSpec, + new DefaultLocalClusterFactory( + new LocalDistributionResolver(new SnapshotDistributionResolver(new ReleasedDistributionResolver())) + ) + ); + } + } + + private static PrintStream originalOut; + private static ByteArrayOutputStream stdOutput; + + private static void redirectStdout() { + if (originalOut == null) { + originalOut = System.out; + stdOutput = new ByteArrayOutputStream(); + // this duplicates the crash messages, but not the log output. That's ok. + System.setOut(new TeePrintStream(originalOut, stdOutput)); + } + } + + @ClassRule + public static ElasticsearchCluster cluster = new StdOutCatchingClusterBuilder().distribution(DistributionType.INTEG_TEST) + .nodes(1) + .module("test-jvm-crash") + .setting("xpack.security.enabled", "false") + .jvmArg("-Djvm.crash=true") + .build(); + + @AfterClass + public static void resetStdout() { + if (originalOut != null) { + System.setOut(originalOut); + } + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testJvmCrash() throws Exception { + final long pid = getElasticsearchPid(); + assertJvmArgs(pid, containsString("-Djvm.crash=true")); + + expectThrows(IOException.class, () -> client().performRequest(new Request("GET", "/_crash"))); + + // the Elasticsearch process should die + assertBusy(() -> assertJvmArgs(pid, not(containsString("-Djvm.crash=true")))); + + // parse the logs and ensure that Elasticsearch died with the expected cause + assertThat( + stdOutput, + hasToString( + matchesRegex( + Pattern.compile(".*# A fatal error has been detected by the Java Runtime Environment:.*SIGSEGV.*", Pattern.DOTALL) + ) + ) + ); + } + + private Process startJcmd(long pid) throws IOException { + final String jcmdPath = PathUtils.get(System.getProperty("java.home"), "bin/jcmd").toString(); + return new ProcessBuilder().command(jcmdPath, Long.toString(pid), "VM.command_line").redirectErrorStream(true).start(); + } + + private void assertJvmArgs(long pid, Matcher matcher) throws IOException, InterruptedException { + Process jcmdProcess = startJcmd(pid); + + if (Constants.WINDOWS) { + // jcmd on windows appears to have a subtle bug where if the process being connected to + // dies while jcmd is running, it can hang indefinitely. Here we detect this case by + // waiting a fixed amount of time, and then killing/retrying the process + boolean exited = jcmdProcess.waitFor(10, TimeUnit.SECONDS); + if (exited == false) { + logger.warn("jcmd hung, killing process and retrying"); + jcmdProcess.destroyForcibly(); + jcmdProcess = startJcmd(pid); + } + } + + List outputLines = readLines(jcmdProcess.getInputStream()); + + String jvmArgs = outputLines.stream().filter(l -> l.startsWith("jvm_args")).findAny().orElse(null); + try { + assertThat(jvmArgs, matcher); + } catch (AssertionError ae) { + logger.error("Failed matcher for jvm pid " + pid); + logger.error("jcmd output: " + String.join("\n", outputLines)); + throw ae; + } + } + + private long getElasticsearchPid() throws IOException { + Response response = client().performRequest(new Request("GET", "/_nodes/process")); + @SuppressWarnings("unchecked") + var nodesInfo = (Map) entityAsMap(response).get("nodes"); + @SuppressWarnings("unchecked") + var nodeInfo = (Map) nodesInfo.values().iterator().next(); + @SuppressWarnings("unchecked") + var processInfo = (Map) nodeInfo.get("process"); + Object stringPid = processInfo.get("id"); + return Long.parseLong(stringPid.toString()); + } + + private List readLines(InputStream is) throws IOException { + try (BufferedReader in = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8))) { + return in.lines().toList(); + } + } + + @Override + protected boolean preserveClusterUponCompletion() { + // as the cluster is dead its state can not be wiped successfully so we have to bypass wiping the cluster + return true; + } +} diff --git a/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/TeePrintStream.java b/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/TeePrintStream.java new file mode 100644 index 0000000000000..9593dbf387d62 --- /dev/null +++ b/test/external-modules/jvm-crash/src/javaRestTest/java/org/elasticsearch/test/jvm_crash/TeePrintStream.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.jvm_crash; + +import java.io.OutputStream; +import java.io.PrintStream; + +/** + * Copies output to another {@code PrintStream}, as well as an {@code OutputStream} + */ +class TeePrintStream extends PrintStream { + private final PrintStream delegate; + + TeePrintStream(PrintStream delegate, OutputStream out) { + super(out); + this.delegate = delegate; + } + + @Override + public void flush() { + delegate.flush(); + super.flush(); + } + + @Override + public void close() { + delegate.close(); + super.close(); + } + + @Override + public boolean checkError() { + return delegate.checkError() || super.checkError(); + } + + @Override + public void write(int b) { + delegate.write(b); + super.write(b); + } + + @Override + public void write(byte[] buf, int off, int len) { + delegate.write(buf, off, len); + super.write(buf, off, len); + } + + @Override + public void print(boolean b) { + delegate.print(b); + super.print(b); + } + + @Override + public void print(char c) { + delegate.print(c); + super.print(c); + } + + @Override + public void print(int i) { + delegate.print(i); + super.print(i); + } + + @Override + public void print(long l) { + delegate.print(l); + super.print(l); + } + + @Override + public void print(float f) { + delegate.print(f); + super.print(f); + } + + @Override + public void print(double d) { + delegate.print(d); + super.print(d); + } + + @Override + public void print(char[] s) { + delegate.print(s); + super.print(s); + } + + @Override + public void print(String s) { + delegate.print(s); + super.print(s); + } + + @Override + public void print(Object obj) { + delegate.print(obj); + super.print(obj); + } + + @Override + public void println() { + delegate.println(); + super.println(); + } +} diff --git a/test/external-modules/jvm-crash/src/main/java/org/elasticsearch/test/jvm_crash/JvmCrashPlugin.java b/test/external-modules/jvm-crash/src/main/java/org/elasticsearch/test/jvm_crash/JvmCrashPlugin.java new file mode 100644 index 0000000000000..2bc4d969cba59 --- /dev/null +++ b/test/external-modules/jvm-crash/src/main/java/org/elasticsearch/test/jvm_crash/JvmCrashPlugin.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.jvm_crash; + +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.features.NodeFeature; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; + +import java.util.List; +import java.util.function.Predicate; +import java.util.function.Supplier; + +public class JvmCrashPlugin extends Plugin implements ActionPlugin { + @Override + public List getRestHandlers( + Settings settings, + NamedWriteableRegistry namedWriteableRegistry, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster, + Predicate clusterSupportsFeature + ) { + return List.of(new RestJvmCrashAction()); + } +} diff --git a/test/external-modules/jvm-crash/src/main/java/org/elasticsearch/test/jvm_crash/RestJvmCrashAction.java b/test/external-modules/jvm-crash/src/main/java/org/elasticsearch/test/jvm_crash/RestJvmCrashAction.java new file mode 100644 index 0000000000000..ec9fceaea9c6f --- /dev/null +++ b/test/external-modules/jvm-crash/src/main/java/org/elasticsearch/test/jvm_crash/RestJvmCrashAction.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.jvm_crash; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.RestRequest; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.security.AccessController; +import java.security.PrivilegedExceptionAction; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestJvmCrashAction implements RestHandler { + + // Turns out, it's actually quite hard to get the JVM to crash... + private static Method FREE_MEMORY; + private static Object UNSAFE; + static { + try { + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + Class unsafe = Class.forName("sun.misc.Unsafe"); + + FREE_MEMORY = unsafe.getMethod("freeMemory", long.class); + Field f = unsafe.getDeclaredField("theUnsafe"); + f.setAccessible(true); + UNSAFE = f.get(null); + return null; + }); + } catch (Exception e) { + throw new AssertionError(e); + } + } + + RestJvmCrashAction() {} + + @Override + public List routes() { + return List.of(new Route(GET, "/_crash")); + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + // BIG BADDA BOOM + try { + AccessController.doPrivileged((PrivilegedExceptionAction) () -> FREE_MEMORY.invoke(UNSAFE, 1L)); + } catch (Exception e) { + throw new AssertionError(e); + } + } +} diff --git a/test/external-modules/jvm-crash/src/main/plugin-metadata/plugin-security.policy b/test/external-modules/jvm-crash/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..860ae72b058db --- /dev/null +++ b/test/external-modules/jvm-crash/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,6 @@ +grant { + // various permissions to fiddle with Unsafe + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; +}; diff --git a/test/framework/src/main/java/org/elasticsearch/geo/GeometryPointCountVisitor.java b/test/framework/src/main/java/org/elasticsearch/geo/GeometryPointCountVisitor.java new file mode 100644 index 0000000000000..06b468250bd8f --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/geo/GeometryPointCountVisitor.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.geo; + +import org.elasticsearch.geometry.Circle; +import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.GeometryCollection; +import org.elasticsearch.geometry.GeometryVisitor; +import org.elasticsearch.geometry.Line; +import org.elasticsearch.geometry.LinearRing; +import org.elasticsearch.geometry.MultiLine; +import org.elasticsearch.geometry.MultiPoint; +import org.elasticsearch.geometry.MultiPolygon; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.Polygon; +import org.elasticsearch.geometry.Rectangle; + +public class GeometryPointCountVisitor implements GeometryVisitor { + + @Override + public Integer visit(Circle circle) throws RuntimeException { + return 2; + } + + @Override + public Integer visit(GeometryCollection collection) throws RuntimeException { + int size = 0; + for (Geometry geometry : collection) { + size += geometry.visit(this); + } + return size; + } + + @Override + public Integer visit(Line line) throws RuntimeException { + return line.length(); + } + + @Override + public Integer visit(LinearRing ring) throws RuntimeException { + return ring.length(); + } + + @Override + public Integer visit(MultiLine multiLine) throws RuntimeException { + return visit((GeometryCollection) multiLine); + } + + @Override + public Integer visit(MultiPoint multiPoint) throws RuntimeException { + return multiPoint.size(); + } + + @Override + public Integer visit(MultiPolygon multiPolygon) throws RuntimeException { + return visit((GeometryCollection) multiPolygon); + } + + @Override + public Integer visit(Point point) throws RuntimeException { + return 1; + } + + @Override + public Integer visit(Polygon polygon) throws RuntimeException { + int size = polygon.getPolygon().length(); + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + size += polygon.getHole(i).length(); + } + return size; + } + + @Override + public Integer visit(Rectangle rectangle) throws RuntimeException { + return 4; + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java b/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java index ab15204a6095c..1c017a9da18ee 100644 --- a/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/geo/GeometryTestUtils.java @@ -205,6 +205,11 @@ public static Geometry randomGeometry(boolean hasAlt) { return randomGeometry(0, hasAlt); } + public static Geometry randomGeometry(boolean hasAlt, int maxPoints) { + var pointCounter = new GeometryPointCountVisitor(); + return randomValueOtherThanMany(g -> g.visit(pointCounter) > maxPoints, () -> randomGeometry(0, hasAlt)); + } + protected static Geometry randomGeometry(int level, boolean hasAlt) { @SuppressWarnings("unchecked") Function geometry = ESTestCase.randomFrom( diff --git a/test/framework/src/main/java/org/elasticsearch/geo/ShapeTestUtils.java b/test/framework/src/main/java/org/elasticsearch/geo/ShapeTestUtils.java index 1e21ad1acfd08..4918bd92fdfee 100644 --- a/test/framework/src/main/java/org/elasticsearch/geo/ShapeTestUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/geo/ShapeTestUtils.java @@ -184,6 +184,11 @@ public static Geometry randomGeometry(boolean hasAlt) { return randomGeometry(0, hasAlt); } + public static Geometry randomGeometry(boolean hasAlt, int maxPoints) { + var pointCounter = new GeometryPointCountVisitor(); + return randomValueOtherThanMany(g -> g.visit(pointCounter) > maxPoints, () -> randomGeometry(0, hasAlt)); + } + protected static Geometry randomGeometry(int level, boolean hasAlt) { @SuppressWarnings("unchecked") Function geometry = ESTestCase.randomFrom( diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 0486022620398..a6b737f162547 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -815,7 +815,7 @@ private void roundTripSyntheticSource(DocumentMapper mapper, String syntheticSou } } - private static String syntheticSource(DocumentMapper mapper, IndexReader reader, int docId) throws IOException { + protected static String syntheticSource(DocumentMapper mapper, IndexReader reader, int docId) throws IOException { LeafReader leafReader = getOnlyLeafReader(reader); final String synthetic1; diff --git a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java index 97f17858e753d..c6c9f5b727980 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/recovery/AbstractIndexRecoveryIntegTestCase.java @@ -470,7 +470,7 @@ private static void createSnapshotThatCanBeUsedDuringRecovery(String indexName) // create repo assertAcked( - clusterAdmin().preparePutRepository(REPO_NAME) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, REPO_NAME) .setType("fs") .setSettings( Settings.builder() @@ -481,7 +481,7 @@ private static void createSnapshotThatCanBeUsedDuringRecovery(String indexName) ); // create snapshot - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(REPO_NAME, SNAP_NAME) + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO_NAME, SNAP_NAME) .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -492,7 +492,7 @@ private static void createSnapshotThatCanBeUsedDuringRecovery(String indexName) ); assertThat( - clusterAdmin().prepareGetSnapshots(REPO_NAME).setSnapshots(SNAP_NAME).get().getSnapshots().get(0).state(), + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO_NAME).setSnapshots(SNAP_NAME).get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS) ); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java index 7cdeaeedfdeaf..faada33eade83 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -71,7 +71,7 @@ public void setUp() throws Exception { @Override public void tearDown() throws Exception { deleteAndAssertEmpty(getRepository().basePath()); - clusterAdmin().prepareDeleteRepository(TEST_REPO_NAME).get(); + clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, TEST_REPO_NAME).get(); super.tearDown(); } @@ -105,10 +105,11 @@ public void testCreateSnapshot() { final String snapshotName = "test-snap-" + System.currentTimeMillis(); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REPO_NAME, snapshotName) - .setWaitForCompletion(true) - .setIndices("test-idx-*", "-test-idx-3") - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + TEST_REPO_NAME, + snapshotName + ).setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat( createSnapshotResponse.getSnapshotInfo().successfulShards(), @@ -116,11 +117,16 @@ public void testCreateSnapshot() { ); assertThat( - clusterAdmin().prepareGetSnapshots(TEST_REPO_NAME).setSnapshots(snapshotName).get().getSnapshots().get(0).state(), + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) + .setSnapshots(snapshotName) + .get() + .getSnapshots() + .get(0) + .state(), equalTo(SnapshotState.SUCCESS) ); - assertTrue(clusterAdmin().prepareDeleteSnapshot(TEST_REPO_NAME, snapshotName).get().isAcknowledged()); + assertTrue(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, snapshotName).get().isAcknowledged()); } public void testListChildren() { @@ -178,10 +184,11 @@ public void testCleanup() throws Exception { final String snapshotName = "test-snap-" + System.currentTimeMillis(); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REPO_NAME, snapshotName) - .setWaitForCompletion(true) - .setIndices("test-idx-*", "-test-idx-3") - .get(); + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + TEST_REPO_NAME, + snapshotName + ).setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat( createSnapshotResponse.getSnapshotInfo().successfulShards(), @@ -189,7 +196,12 @@ public void testCleanup() throws Exception { ); assertThat( - clusterAdmin().prepareGetSnapshots(TEST_REPO_NAME).setSnapshots(snapshotName).get().getSnapshots().get(0).state(), + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME) + .setSnapshots(snapshotName) + .get() + .getSnapshots() + .get(0) + .state(), equalTo(SnapshotState.SUCCESS) ); @@ -201,7 +213,7 @@ public void testCleanup() throws Exception { createDanglingIndex(repo, genericExec); logger.info("--> deleting a snapshot to trigger repository cleanup"); - clusterAdmin().prepareDeleteSnapshot(TEST_REPO_NAME, snapshotName).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, snapshotName).get(); BlobStoreTestUtil.assertConsistency(repo); @@ -209,7 +221,11 @@ public void testCleanup() throws Exception { createDanglingIndex(repo, genericExec); logger.info("--> Execute repository cleanup"); - final CleanupRepositoryResponse response = clusterAdmin().prepareCleanupRepository(TEST_REPO_NAME).get(); + final CleanupRepositoryResponse response = clusterAdmin().prepareCleanupRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + TEST_REPO_NAME + ).get(); assertCleanupResponse(response, 3L, 1L); } @@ -228,23 +244,35 @@ public void testIndexLatest() throws Exception { final var repository = getRepository(); final var blobContents = new HashSet(); - final var createSnapshot1Response = clusterAdmin().prepareCreateSnapshot(TEST_REPO_NAME, randomIdentifier()) + final var createSnapshot1Response = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, randomIdentifier()) .setWaitForCompletion(true) .get(); assertTrue(blobContents.add(readIndexLatest(repository))); - clusterAdmin().prepareGetSnapshots(TEST_REPO_NAME).get(); + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME).get(); assertFalse(blobContents.add(readIndexLatest(repository))); - final var createSnapshot2Response = clusterAdmin().prepareCreateSnapshot(TEST_REPO_NAME, randomIdentifier()) + final var createSnapshot2Response = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, TEST_REPO_NAME, randomIdentifier()) .setWaitForCompletion(true) .get(); assertTrue(blobContents.add(readIndexLatest(repository))); - assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REPO_NAME, createSnapshot1Response.getSnapshotInfo().snapshotId().getName())); + assertAcked( + clusterAdmin().prepareDeleteSnapshot( + TEST_REQUEST_TIMEOUT, + TEST_REPO_NAME, + createSnapshot1Response.getSnapshotInfo().snapshotId().getName() + ) + ); assertTrue(blobContents.add(readIndexLatest(repository))); - assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REPO_NAME, createSnapshot2Response.getSnapshotInfo().snapshotId().getName())); + assertAcked( + clusterAdmin().prepareDeleteSnapshot( + TEST_REQUEST_TIMEOUT, + TEST_REPO_NAME, + createSnapshot2Response.getSnapshotInfo().snapshotId().getName() + ) + ); assertTrue(blobContents.add(readIndexLatest(repository))); } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index b1765218ff7f2..6951e1941686d 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -101,7 +101,12 @@ protected final String createRepository(final String name, final boolean verify) protected final String createRepository(final String name, final Settings settings, final boolean verify) { logger.info("--> creating repository [name: {}, verify: {}, settings: {}]", name, verify, settings); - assertAcked(clusterAdmin().preparePutRepository(name).setType(repositoryType()).setVerify(verify).setSettings(settings)); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name) + .setType(repositoryType()) + .setVerify(verify) + .setSettings(settings) + ); internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).forEach(repositories -> { assertThat(repositories.repository(name), notNullValue()); @@ -116,7 +121,7 @@ protected final String createRepository(final String name, final Settings settin protected final void deleteRepository(final String name) { logger.debug("--> deleting repository [name: {}]", name); - assertAcked(clusterAdmin().prepareDeleteRepository(name)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name)); internalCluster().getDataOrMasterNodeInstances(RepositoriesService.class).forEach(repositories -> { RepositoryMissingException e = expectThrows(RepositoryMissingException.class, () -> repositories.repository(name)); assertThat(e.repository(), equalTo(name)); @@ -316,7 +321,9 @@ protected void testSnapshotAndRestore(boolean recreateRepositoryBeforeRestore) t final String snapshotName = randomName(); logger.info("--> create snapshot {}:{}", repoName, snapshotName); assertSuccessfulSnapshot( - clusterAdmin().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).setIndices(indexNames) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .setIndices(indexNames) ); List deleteIndices = randomSubsetOf(randomIntBetween(0, indexCount), indexNames); @@ -351,7 +358,9 @@ protected void testSnapshotAndRestore(boolean recreateRepositoryBeforeRestore) t } logger.info("--> restore all indices from the snapshot"); - assertSuccessfulRestore(clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(true)); + assertSuccessfulRestore( + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName).setWaitForCompletion(true) + ); // higher timeout since we can have quite a few shards and a little more data here ensureGreen(TimeValue.timeValueSeconds(120)); @@ -361,15 +370,23 @@ protected void testSnapshotAndRestore(boolean recreateRepositoryBeforeRestore) t } logger.info("--> delete snapshot {}:{}", repoName, snapshotName); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName).get()); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots(repoName).setSnapshots(snapshotName).get()); + expectThrows( + SnapshotMissingException.class, + () -> clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName).setSnapshots(snapshotName).get() + ); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).get()); + expectThrows( + SnapshotMissingException.class, + () -> clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName).get() + ); expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(randomBoolean()).get() + () -> clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(randomBoolean()) + .get() ); } @@ -395,7 +412,9 @@ public void testMultipleSnapshotAndRollback() throws Exception { docCounts[i] = (int) SearchResponseUtils.getTotalHitsValue(prepareSearch(indexName).setSize(0)); logger.info("--> create snapshot {}:{} with {} documents", repoName, snapshotName + "-" + i, docCounts[i]); assertSuccessfulSnapshot( - clusterAdmin().prepareCreateSnapshot(repoName, snapshotName + "-" + i).setWaitForCompletion(true).setIndices(indexName) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName + "-" + i) + .setWaitForCompletion(true) + .setIndices(indexName) ); } @@ -411,7 +430,8 @@ public void testMultipleSnapshotAndRollback() throws Exception { logger.info("--> restore index from the snapshot"); assertSuccessfulRestore( - clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName + "-" + iterationToRestore).setWaitForCompletion(true) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName + "-" + iterationToRestore) + .setWaitForCompletion(true) ); ensureGreen(); @@ -420,7 +440,7 @@ public void testMultipleSnapshotAndRollback() throws Exception { for (int i = 0; i < iterationCount; i++) { logger.info("--> delete snapshot {}:{}", repoName, snapshotName + "-" + i); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName + "-" + i).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName + "-" + i).get()); } } @@ -451,7 +471,7 @@ public void testIndicesDeletedFromRepository() throws Exception { logger.info("--> take a snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repoName, "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap") .setWaitForCompletion(true) .get(); assertEquals(createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards()); @@ -466,14 +486,14 @@ public void testIndicesDeletedFromRepository() throws Exception { logger.info("--> take another snapshot with only 2 of the 3 indices"); createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repoName, "test-snap2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap2") .setWaitForCompletion(true) .setIndices("test-idx-1", "test-idx-2") .get(); assertEquals(createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards()); logger.info("--> delete a snapshot"); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, "test-snap").get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap").get()); logger.info("--> verify index folder deleted from blob container"); RepositoriesService repositoriesSvc = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); @@ -493,7 +513,7 @@ public void testIndicesDeletedFromRepository() throws Exception { } } - assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, "test-snap2").get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "test-snap2").get()); } public void testBlobStoreBulkDeletion() throws Exception { @@ -541,7 +561,7 @@ public void testDanglingShardLevelBlobCleanup() throws Exception { SnapshotState.SUCCESS, client.admin() .cluster() - .prepareCreateSnapshot(repoName, "snapshot-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-1") .setWaitForCompletion(true) .get() .getSnapshotInfo() @@ -571,14 +591,14 @@ public void testDanglingShardLevelBlobCleanup() throws Exception { ); final var snapshot2Info = client.admin() .cluster() - .prepareCreateSnapshot(repoName, "snapshot-2") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-2") .setWaitForCompletion(true) .get() .getSnapshotInfo(); assertEquals(SnapshotState.SUCCESS, snapshot2Info.state()); // Delete the first snapshot, which should leave only the blobs from snapshot-2 - assertAcked(client.admin().cluster().prepareDeleteSnapshot(repoName, "snapshot-1")); + assertAcked(client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-1")); // Retrieve the blobs actually present final var actualBlobs = shardContainer.listBlobs(randomPurpose()) @@ -616,7 +636,7 @@ public void testDanglingShardLevelBlobCleanup() throws Exception { ); } - assertAcked(client.admin().cluster().prepareDeleteSnapshot(repoName, "snapshot-2")); + assertAcked(client.admin().cluster().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snapshot-2")); } protected void addRandomDocuments(String name, int numDocs) throws InterruptedException { diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java index 43b0fb7025bd8..b028659eb8d46 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESFsBasedRepositoryIntegTestCase.java @@ -51,11 +51,13 @@ public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOExce final String snapshotName = randomName(); logger.info("--> create snapshot {}:{}", repoName, snapshotName); assertSuccessfulSnapshot( - clusterAdmin().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).setIndices(indexName) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .setIndices(indexName) ); assertAcked(client().admin().indices().prepareDelete(indexName)); - assertAcked(clusterAdmin().prepareDeleteRepository(repoName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName)); final Path deletedPath; try (Stream contents = Files.list(repoPath.resolve("indices"))) { @@ -69,7 +71,9 @@ public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOExce final ElasticsearchException exception = expectThrows( ElasticsearchException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(randomBoolean()).get() + () -> clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(randomBoolean()) + .get() ); assertThat(exception.getRootCause(), instanceOf(NoSuchFileException.class)); diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java index 22bbfad3cfb70..a79ba296e7554 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESMockAPIBasedRepositoryIntegTestCase.java @@ -169,15 +169,19 @@ public final void testSnapshotWithLargeSegmentFiles() throws Exception { assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); final String snapshot = "snapshot"; - assertSuccessfulSnapshot(clusterAdmin().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index)); + assertSuccessfulSnapshot( + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true).setIndices(index) + ); assertAcked(client().admin().indices().prepareDelete(index)); - assertSuccessfulRestore(clusterAdmin().prepareRestoreSnapshot(repository, snapshot).setWaitForCompletion(true)); + assertSuccessfulRestore( + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true) + ); ensureGreen(index); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repository, snapshot).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).get()); } public void testRequestStats() throws Exception { @@ -198,15 +202,19 @@ public void testRequestStats() throws Exception { assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); final String snapshot = "snapshot"; - assertSuccessfulSnapshot(clusterAdmin().prepareCreateSnapshot(repository, snapshot).setWaitForCompletion(true).setIndices(index)); + assertSuccessfulSnapshot( + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true).setIndices(index) + ); assertAcked(client().admin().indices().prepareDelete(index)); - assertSuccessfulRestore(clusterAdmin().prepareRestoreSnapshot(repository, snapshot).setWaitForCompletion(true)); + assertSuccessfulRestore( + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).setWaitForCompletion(true) + ); ensureGreen(index); assertHitCount(prepareSearch(index).setSize(0).setTrackTotalHits(true), nbDocs); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repository, snapshot).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot).get()); final RepositoryStats repositoryStats = StreamSupport.stream( internalCluster().getInstances(RepositoriesService.class).spliterator(), diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 2e7ce0400d78b..23ea4cc95fa35 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -144,11 +144,11 @@ public void verifyNoLeakedListeners() throws Exception { @After public void assertRepoConsistency() { if (skipRepoConsistencyCheckReason == null) { - clusterAdmin().prepareGetRepositories().get().repositories().forEach(repositoryMetadata -> { + clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT).get().repositories().forEach(repositoryMetadata -> { final String name = repositoryMetadata.name(); if (repositoryMetadata.settings().getAsBoolean(READONLY_SETTING_KEY, false) == false) { - clusterAdmin().prepareDeleteSnapshot(name, OLD_VERSION_SNAPSHOT_PREFIX + "*").get(); - clusterAdmin().prepareCleanupRepository(name).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, name, OLD_VERSION_SNAPSHOT_PREFIX + "*").get(); + clusterAdmin().prepareCleanupRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name).get(); } BlobStoreTestUtil.assertConsistency(getRepositoryOnMaster(name)); }); @@ -318,7 +318,12 @@ protected void createRepository(String repoName, String type, Settings.Builder s public static void createRepository(Logger logger, String repoName, String type, Settings.Builder settings, boolean verify) { logger.info("--> creating or updating repository [{}] [{}]", repoName, type); - assertAcked(clusterAdmin().preparePutRepository(repoName).setVerify(verify).setType(type).setSettings(settings)); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName) + .setVerify(verify) + .setType(type) + .setSettings(settings) + ); } protected void createRepository(String repoName, String type, Settings.Builder settings) { @@ -342,7 +347,7 @@ public static void createRepository(Logger logger, String repoName, String type) } protected void deleteRepository(String repoName) { - assertAcked(clusterAdmin().prepareDeleteRepository(repoName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName)); } public static Settings.Builder randomRepositorySettings() { @@ -381,10 +386,11 @@ protected void maybeInitWithOldSnapshotVersion(String repoName, Path repoPath) t protected String initWithSnapshotVersion(String repoName, Path repoPath, IndexVersion version) throws Exception { assertThat("This hack only works on an empty repository", getRepositoryData(repoName).getSnapshotIds(), empty()); final String oldVersionSnapshot = OLD_VERSION_SNAPSHOT_PREFIX + version.id(); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, oldVersionSnapshot) - .setIndices("does-not-exist-for-sure-*") - .setWaitForCompletion(true) - .get(); + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + repoName, + oldVersionSnapshot + ).setIndices("does-not-exist-for-sure-*").setWaitForCompletion(true).get(); final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); assertThat(snapshotInfo.totalShards(), is(0)); @@ -442,7 +448,7 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, IndexVe final RepositoryMetadata repoMetadata = blobStoreRepository.getMetadata(); if (BlobStoreRepository.CACHE_REPOSITORY_DATA.get(repoMetadata.settings())) { logger.info("--> recreating repository to clear caches"); - assertAcked(clusterAdmin().prepareDeleteRepository(repoName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repoName)); createRepository(repoName, repoMetadata.type(), Settings.builder().put(repoMetadata.settings())); } return oldVersionSnapshot; @@ -454,7 +460,7 @@ protected SnapshotInfo createFullSnapshot(String repoName, String snapshotName) public static SnapshotInfo createFullSnapshot(Logger logger, String repoName, String snapshotName) { logger.info("--> creating full snapshot [{}] in [{}]", snapshotName, repoName); - CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(repoName, snapshotName) + CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) .setIncludeGlobalState(true) .setWaitForCompletion(true) .get(); @@ -466,7 +472,7 @@ public static SnapshotInfo createFullSnapshot(Logger logger, String repoName, St protected SnapshotInfo createSnapshot(String repositoryName, String snapshot, List indices, List featureStates) { logger.info("--> creating snapshot [{}] of {} in [{}]", snapshot, indices, repositoryName); - final CreateSnapshotResponse response = clusterAdmin().prepareCreateSnapshot(repositoryName, snapshot) + final CreateSnapshotResponse response = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshot) .setIndices(indices.toArray(Strings.EMPTY_ARRAY)) .setWaitForCompletion(true) .setFeatureStates(featureStates.toArray(Strings.EMPTY_ARRAY)) @@ -604,7 +610,10 @@ public static ActionFuture startFullSnapshot( boolean partial ) { logger.info("--> creating full snapshot [{}] to repo [{}]", snapshotName, repoName); - return clusterAdmin().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).setPartial(partial).execute(); + return clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName) + .setWaitForCompletion(true) + .setPartial(partial) + .execute(); } protected void awaitNumberOfSnapshotsInProgress(int count) throws Exception { @@ -642,12 +651,15 @@ protected void createIndexWithContent(String indexName, Settings indexSettings) protected ActionFuture startDeleteSnapshot(String repoName, String snapshotName) { logger.info("--> deleting snapshot [{}] from repo [{}]", snapshotName, repoName); - return clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).execute(); + return clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotName).execute(); } protected ActionFuture startDeleteSnapshots(String repoName, List snapshotNames, String viaNode) { logger.info("--> deleting snapshots {} from repo [{}]", snapshotNames, repoName); - return client(viaNode).admin().cluster().prepareDeleteSnapshot(repoName, snapshotNames.toArray(Strings.EMPTY_ARRAY)).execute(); + return client(viaNode).admin() + .cluster() + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshotNames.toArray(Strings.EMPTY_ARRAY)) + .execute(); } protected static void updateClusterState(final Function updater) throws Exception { @@ -673,7 +685,10 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) } protected SnapshotInfo getSnapshot(String repository, String snapshot) { - final List snapshotInfos = clusterAdmin().prepareGetSnapshots(repository).setSnapshots(snapshot).get().getSnapshots(); + final List snapshotInfos = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repository) + .setSnapshots(snapshot) + .get() + .getSnapshots(); assertThat(snapshotInfos, hasSize(1)); return snapshotInfos.get(0); } @@ -704,16 +719,18 @@ public static List createNSnapshots(Logger logger, String repoName, int final String snapshot = prefix + i; snapshotNames.add(snapshot); final Map userMetadata = randomUserMetadata(); - clusterAdmin().prepareCreateSnapshot(repoName, snapshot) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repoName, snapshot) .setWaitForCompletion(true) .setUserMetadata(userMetadata) .execute(snapshotsListener.delegateFailure((l, response) -> { final SnapshotInfo snapshotInfoInResponse = response.getSnapshotInfo(); assertEquals(userMetadata, snapshotInfoInResponse.userMetadata()); - clusterAdmin().prepareGetSnapshots(repoName).setSnapshots(snapshot).execute(l.safeMap(getResponse -> { - assertEquals(snapshotInfoInResponse, getResponse.getSnapshots().get(0)); - return response; - })); + clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repoName) + .setSnapshots(snapshot) + .execute(l.safeMap(getResponse -> { + assertEquals(snapshotInfoInResponse, getResponse.getSnapshots().get(0)); + return response; + })); })); } for (CreateSnapshotResponse snapshotResponse : allSnapshotsDone.get()) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java index 374854626703d..73acdb6e19d4b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java @@ -218,22 +218,25 @@ public void wipeRepositories(String... repositories) { for (String repository : repositories) { ActionListener.run( listeners.acquire(), - l -> client().admin().cluster().prepareDeleteRepository(repository).execute(new ActionListener<>() { - @Override - public void onResponse(AcknowledgedResponse acknowledgedResponse) { - l.onResponse(null); - } - - @Override - public void onFailure(Exception e) { - if (e instanceof RepositoryMissingException) { - // ignore + l -> client().admin() + .cluster() + .prepareDeleteRepository(ESTestCase.TEST_REQUEST_TIMEOUT, ESTestCase.TEST_REQUEST_TIMEOUT, repository) + .execute(new ActionListener<>() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { l.onResponse(null); - } else { - l.onFailure(e); } - } - }) + + @Override + public void onFailure(Exception e) { + if (e instanceof RepositoryMissingException) { + // ignore + l.onResponse(null); + } else { + l.onFailure(e); + } + } + }) ); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 3f0f6c91443ad..9e88d66c430e7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -2024,7 +2024,7 @@ protected static void registerRepository(RestClient restClient, String repositor final Request request = newXContentRequest( HttpMethod.PUT, "/_snapshot/" + repository, - new PutRepositoryRequest(repository).type(type).settings(settings) + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repository).type(type).settings(settings) ); request.addParameter("verify", Boolean.toString(verify)); diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AutoscalingSnapshotsIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AutoscalingSnapshotsIT.java index bb7eba340c0ad..22210361f4430 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AutoscalingSnapshotsIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/AutoscalingSnapshotsIT.java @@ -36,7 +36,11 @@ public class AutoscalingSnapshotsIT extends AutoscalingIntegTestCase { public void setup() throws Exception { Path location = randomRepoPath(); logger.info("--> creating repository [{}] [{}]", REPO, "fs"); - assertAcked(clusterAdmin().preparePutRepository(REPO).setType("fs").setSettings(Settings.builder().put("location", location))); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, REPO) + .setType("fs") + .setSettings(Settings.builder().put("location", location)) + ); } public void testAutoscalingPolicyWillNotBeRestored() { @@ -47,7 +51,7 @@ public void testAutoscalingPolicyWillNotBeRestored() { CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(REPO, SNAPSHOT) + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setIncludeGlobalState(true) .get(); @@ -71,7 +75,7 @@ public void testAutoscalingPolicyWillNotBeRestored() { RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot(REPO, SNAPSHOT) + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .setWaitForCompletion(true) .setRestoreGlobalState(true) .get(); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java index 90bbc29a11b41..423d555de9eab 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java @@ -154,7 +154,11 @@ public void testThatRepositoryRecoversEmptyIndexBasedOnLeaderSettings() throws I Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indices(leaderIndex) + RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest( + TEST_REQUEST_TIMEOUT, + leaderClusterRepoName, + CcrRepository.LATEST + ).indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") .renameReplacement(followerIndex) @@ -225,7 +229,11 @@ public void testDocsAreRecovered() throws Exception { Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indices(leaderIndex) + RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest( + TEST_REQUEST_TIMEOUT, + leaderClusterRepoName, + CcrRepository.LATEST + ).indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") .renameReplacement(followerIndex) @@ -292,7 +300,11 @@ public void testRateLimitingIsEmployed() throws Exception { Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indices(leaderIndex) + RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest( + TEST_REQUEST_TIMEOUT, + leaderClusterRepoName, + CcrRepository.LATEST + ).indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") .renameReplacement(followerIndex) @@ -357,7 +369,11 @@ public void testIndividualActionsTimeout() throws Exception { Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indices(leaderIndex) + RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest( + TEST_REQUEST_TIMEOUT, + leaderClusterRepoName, + CcrRepository.LATEST + ).indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") .renameReplacement(followerIndex) @@ -420,7 +436,11 @@ public void testFollowerMappingIsUpdated() throws IOException { Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indices(leaderIndex) + RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest( + TEST_REQUEST_TIMEOUT, + leaderClusterRepoName, + CcrRepository.LATEST + ).indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") .renameReplacement(followerIndex) @@ -573,7 +593,8 @@ public void testCcrRepositoryFetchesSnapshotShardSizeFromIndexShardStoreStats() }; clusterService.addListener(listener); - final RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderCluster, CcrRepository.LATEST).indices(leaderIndex) + final RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, leaderCluster, CcrRepository.LATEST) + .indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") .renameReplacement(followerIndex) diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index a0b25faea9256..716554eb3927c 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -151,7 +151,7 @@ private RestoreSnapshotRequest setUpRestoreSnapshotRequest( final Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, followerIndex) .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); - return new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indexSettings(settingsBuilder) + return new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, leaderClusterRepoName, CcrRepository.LATEST).indexSettings(settingsBuilder) .indices(leaderIndex) .indicesOptions(indicesOptions) .renamePattern("^(.*)$") diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java index 560c98fbd210b..a74aa4c323426 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutFollowAction.java @@ -187,13 +187,14 @@ private void createFollowerIndex( .build(); final String leaderClusterRepoName = CcrRepository.NAME_PREFIX + request.getRemoteCluster(); - final RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest(leaderClusterRepoName, CcrRepository.LATEST).indices( - request.getLeaderIndex() - ) + final RestoreSnapshotRequest restoreRequest = new RestoreSnapshotRequest( + request.masterNodeTimeout(), + leaderClusterRepoName, + CcrRepository.LATEST + ).indices(request.getLeaderIndex()) .indicesOptions(request.indicesOptions()) .renamePattern("^(.*)$") .renameReplacement(Matcher.quoteReplacement(request.getFollowerIndex())) - .masterNodeTimeout(request.masterNodeTimeout()) .indexSettings(overrideSettings) .quiet(true); diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java index c2b5080aa16c1..5c02288e704f7 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java @@ -356,7 +356,7 @@ private IndexRequestBuilder[] snapshotAndRestore(final String sourceIdx, final b assertFalse(clusterAdmin().prepareHealth().setTimeout(TimeValue.timeValueSeconds(30)).setWaitForNodes("2").get().isTimedOut()); logger.info("--> restore the index and ensure all shards are allocated"); - RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(repo, snapshot) + RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repo, snapshot) .setWaitForCompletion(true) .setIndices(sourceIdx) .get(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStep.java index 1377e9103b00b..3be40e5a1550d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStep.java @@ -50,8 +50,7 @@ void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentCl } getClient().admin() .cluster() - .prepareDeleteSnapshot(repositoryName, snapshotName) - .setMasterNodeTimeout(TimeValue.MAX_VALUE) + .prepareDeleteSnapshot(TimeValue.MAX_VALUE, repositoryName, snapshotName) .execute(new ActionListener<>() { @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStep.java index 7d6397b7c96dd..070bc804f3279 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStep.java @@ -100,13 +100,12 @@ void createSnapshot(IndexMetadata indexMetadata, ActionListener listene ); return; } - CreateSnapshotRequest request = new CreateSnapshotRequest(snapshotRepository, snapshotName); + CreateSnapshotRequest request = new CreateSnapshotRequest(TimeValue.MAX_VALUE, snapshotRepository, snapshotName); request.indices(indexName); // this is safe as the snapshot creation will still be async, it's just that the listener will be notified when the snapshot is // complete request.waitForCompletion(true); request.includeGlobalState(false); - request.masterNodeTimeout(TimeValue.MAX_VALUE); getClient().admin().cluster().createSnapshot(request, listener.map(response -> { logger.debug( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java index 9c24324f706ca..7ce81fa90a557 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java @@ -104,7 +104,7 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, ); String snapshotName = snapPolicyMeta.getLastSuccess().getSnapshotName(); String repositoryName = snapPolicyMeta.getPolicy().getRepository(); - GetSnapshotsRequest request = new GetSnapshotsRequest().repositories(repositoryName) + GetSnapshotsRequest request = new GetSnapshotsRequest(TimeValue.MAX_VALUE).repositories(repositoryName) .snapshots(new String[] { snapshotName }) .includeIndexNames(true) .verbose(false); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java index 046ccc3037a05..09a49c53ee1a5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.transport.Transports; import org.elasticsearch.xcontent.XContentBuilder; @@ -399,6 +400,24 @@ public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { Map transformedSource = filter(result.v2(), filter, 0); XContentBuilder xContentBuilder = XContentBuilder.builder(result.v1().xContent()).map(transformedSource); visitor.binaryField(fieldInfo, BytesReference.toBytes(BytesReference.bytes(xContentBuilder))); + } else if (IgnoredSourceFieldMapper.NAME.equals(fieldInfo.name)) { + // for _ignored_source, parse, filter out the field and its contents, and serialize back downstream + IgnoredSourceFieldMapper.MappedNameValue mappedNameValue = IgnoredSourceFieldMapper.decodeAsMap(value); + Map transformedField = filter(mappedNameValue.map(), filter, 0); + if (transformedField.isEmpty() == false) { + // The unfiltered map contains at least one element, the field name with its value. If the field contains + // an object or an array, the value of the first element is a map or a list, respectively. Otherwise, + // it's a single leaf value, e.g. a string or a number. + var topValue = mappedNameValue.map().values().iterator().next(); + if (topValue instanceof Map || topValue instanceof List) { + // The field contains an object or an array, reconstruct it from the transformed map in case + // any subfield has been filtered out. + visitor.binaryField(fieldInfo, IgnoredSourceFieldMapper.encodeFromMap(mappedNameValue, transformedField)); + } else { + // The field contains a leaf value, and it hasn't been filtered out. It is safe to propagate the original value. + visitor.binaryField(fieldInfo, value); + } + } } else { visitor.binaryField(fieldInfo, value); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java index 4dc0ea1d77e42..fb892a318f07c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java @@ -256,8 +256,8 @@ private Map addPolicyNameToMetadata(final Map me * Generate a new create snapshot request from this policy. The name of the snapshot is * generated at this time based on any date math expressions in the "name" field. */ - public CreateSnapshotRequest toRequest() { - CreateSnapshotRequest req = new CreateSnapshotRequest(repository, generateSnapshotName(this.name)); + public CreateSnapshotRequest toRequest(TimeValue masterNodeTimeout) { + CreateSnapshotRequest req = new CreateSnapshotRequest(masterNodeTimeout, repository, generateSnapshotName(this.name)); Map mergedConfiguration = configuration == null ? new HashMap<>() : new HashMap<>(configuration); @SuppressWarnings("unchecked") Map metadata = (Map) mergedConfiguration.get("metadata"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java index c2d51680c3146..560dee9b5843c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java @@ -48,6 +48,7 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.util.TestUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Automata; @@ -59,12 +60,16 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.index.SequentialStoredFieldsLeafReader; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.IOUtils; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; +import org.elasticsearch.index.mapper.MapperServiceTestCase; +import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceFieldMapper; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions; import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition; @@ -86,7 +91,7 @@ import static org.hamcrest.Matchers.equalTo; /** Simple tests for this filterreader */ -public class FieldSubsetReaderTests extends ESTestCase { +public class FieldSubsetReaderTests extends MapperServiceTestCase { /** * test filtering two string fields @@ -711,6 +716,127 @@ public void testSourceFilteringIntegration() throws Exception { IOUtils.close(ir, iw, dir); } + public void testIgnoredSourceFilteringIntegration() throws Exception { + DocumentMapper mapper = createMapperService( + Settings.builder() + .put("index.mapping.total_fields.limit", 1) + .put("index.mapping.total_fields.ignore_dynamic_beyond_limit", true) + .build(), + syntheticSourceMapping(b -> { + b.startObject("foo").field("type", "keyword").endObject(); + }) + ).documentMapper(); + + try (Directory directory = newDirectory()) { + RandomIndexWriter iw = indexWriterForSyntheticSource(directory); + ParsedDocument doc = mapper.parse(source(b -> { + b.field("fieldA", "testA"); + b.field("fieldB", "testB"); + b.startObject("obj").field("fieldC", "testC").endObject(); + b.startArray("arr").startObject().field("fieldD", "testD").endObject().endArray(); + })); + doc.updateSeqID(0, 0); + doc.version().setLongValue(0); + iw.addDocuments(doc.docs()); + iw.close(); + + { + Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", IgnoredSourceFieldMapper.NAME)); + try ( + DirectoryReader indexReader = FieldSubsetReader.wrap( + wrapInMockESDirectoryReader(DirectoryReader.open(directory)), + new CharacterRunAutomaton(automaton) + ) + ) { + String syntheticSource = syntheticSource(mapper, indexReader, doc.docs().size() - 1); + assertEquals("{\"fieldA\":\"testA\"}", syntheticSource); + } + } + + { + Automaton automaton = Operations.minus( + Automata.makeAnyString(), + Automatons.patterns("fieldA"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT + ); + try ( + DirectoryReader indexReader = FieldSubsetReader.wrap( + wrapInMockESDirectoryReader(DirectoryReader.open(directory)), + new CharacterRunAutomaton(automaton) + ) + ) { + String syntheticSource = syntheticSource(mapper, indexReader, doc.docs().size() - 1); + assertEquals(""" + {"arr":[{"fieldD":"testD"}],"fieldB":"testB","obj":{"fieldC":"testC"}}""", syntheticSource); + } + } + + { + Automaton automaton = Automatons.patterns(Arrays.asList("obj.fieldC", IgnoredSourceFieldMapper.NAME)); + try ( + DirectoryReader indexReader = FieldSubsetReader.wrap( + wrapInMockESDirectoryReader(DirectoryReader.open(directory)), + new CharacterRunAutomaton(automaton) + ) + ) { + String syntheticSource = syntheticSource(mapper, indexReader, doc.docs().size() - 1); + assertEquals(""" + {"obj":{"fieldC":"testC"}}""", syntheticSource); + } + } + + { + Automaton automaton = Operations.minus( + Automata.makeAnyString(), + Automatons.patterns("obj.fieldC"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT + ); + try ( + DirectoryReader indexReader = FieldSubsetReader.wrap( + wrapInMockESDirectoryReader(DirectoryReader.open(directory)), + new CharacterRunAutomaton(automaton) + ) + ) { + String syntheticSource = syntheticSource(mapper, indexReader, doc.docs().size() - 1); + assertEquals(""" + {"arr":[{"fieldD":"testD"}],"fieldA":"testA","fieldB":"testB"}""", syntheticSource); + } + } + + { + Automaton automaton = Automatons.patterns(Arrays.asList("arr.fieldD", IgnoredSourceFieldMapper.NAME)); + try ( + DirectoryReader indexReader = FieldSubsetReader.wrap( + wrapInMockESDirectoryReader(DirectoryReader.open(directory)), + new CharacterRunAutomaton(automaton) + ) + ) { + String syntheticSource = syntheticSource(mapper, indexReader, doc.docs().size() - 1); + assertEquals(""" + {"arr":[{"fieldD":"testD"}]}""", syntheticSource); + } + } + + { + Automaton automaton = Operations.minus( + Automata.makeAnyString(), + Automatons.patterns("arr.fieldD"), + Operations.DEFAULT_DETERMINIZE_WORK_LIMIT + ); + try ( + DirectoryReader indexReader = FieldSubsetReader.wrap( + wrapInMockESDirectoryReader(DirectoryReader.open(directory)), + new CharacterRunAutomaton(automaton) + ) + ) { + String syntheticSource = syntheticSource(mapper, indexReader, doc.docs().size() - 1); + assertEquals(""" + {"arr":[{}],"fieldA":"testA","fieldB":"testB","obj":{"fieldC":"testC"}}""", syntheticSource); + } + } + } + } + public void testSourceFiltering() { // include on top-level value Map map = new HashMap<>(); diff --git a/x-pack/plugin/core/template-resources/src/main/resources/connector-secrets.json b/x-pack/plugin/core/template-resources/src/main/resources/connector-secrets.json index 96fa641726fa3..40666ea28097b 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/connector-secrets.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/connector-secrets.json @@ -3,7 +3,6 @@ "index": { "auto_expand_replicas": "0-1", "number_of_shards": 1, - "number_of_replicas": 0, "priority": 100, "refresh_interval": "1s" } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/analytics/behavioral_analytics-events-settings.json b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/analytics/behavioral_analytics-events-settings.json index 9aa9731be6524..70ba4c3664588 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/analytics/behavioral_analytics-events-settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/analytics/behavioral_analytics-events-settings.json @@ -8,7 +8,6 @@ }, "codec": "best_compression", "number_of_shards": 1, - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "final_pipeline": "behavioral_analytics-events-final_pipeline", "sort": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-settings.json b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-settings.json index 22f35b3ac5c99..6ff9510574281 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-settings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/connector/elastic-connectors-settings.json @@ -3,8 +3,7 @@ "settings": { "hidden": true, "number_of_shards": "1", - "auto_expand_replicas": "0-1", - "number_of_replicas": "0" + "auto_expand_replicas": "0-1" } }, "_meta": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json b/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json index 0e82cc0f2a6df..4d307949c18db 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/idp/saml-service-provider-template.json @@ -8,7 +8,6 @@ "order": 100, "settings": { "number_of_shards": 1, - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "index.priority": 10, "index.format": 1 diff --git a/x-pack/plugin/core/template-resources/src/main/resources/ilm-history.json b/x-pack/plugin/core/template-resources/src/main/resources/ilm-history.json index e549d3bb3d168..4f47d579f7eb4 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/ilm-history.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/ilm-history.json @@ -8,7 +8,6 @@ "template": { "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1" }, "lifecycle": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-alerts-7.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-alerts-7.json index 4e77d35b4de25..7d13712e9c371 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-alerts-7.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-alerts-7.json @@ -4,7 +4,6 @@ "settings": { "index": { "number_of_shards": 1, - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "format": 7, "codec": "best_compression" diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es.json index d8d911a31baa9..50a5e7c15022a 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-es.json @@ -3,7 +3,6 @@ "version": ${xpack.monitoring.template.release.version}, "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", "index.format": 7, "index.codec": "best_compression" diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-kibana.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-kibana.json index ad631b8e5762c..0e0a6f14e5206 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-kibana.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-kibana.json @@ -3,7 +3,6 @@ "version": ${xpack.monitoring.template.release.version}, "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", "index.format": 7, "index.codec": "best_compression" diff --git a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash.json b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash.json index ae48f9f552b51..e11627f93650d 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/monitoring-logstash.json @@ -3,7 +3,6 @@ "version": ${xpack.monitoring.template.release.version}, "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", "index.format": 7, "index.codec": "best_compression" diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json index 70ffedb3f5462..9b90f97682306 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json @@ -3,7 +3,6 @@ "settings": { "index": { "number_of_shards": 4, - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "max_result_window": 150000, "refresh_interval": "10s", diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json index 0f1c24d96c092..f1e5e01d50c16 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json @@ -2,7 +2,6 @@ "template": { "settings": { "index": { - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json index 353411ed80b2e..3d5e5d0fdc9b7 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json @@ -2,7 +2,6 @@ "template": { "settings": { "index": { - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "sort": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json index e933aa117a6b3..35f53a36b2d0b 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json @@ -2,7 +2,6 @@ "template": { "settings": { "index": { - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "sort": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json index c28a548f95418..9e8a344d23959 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json @@ -3,7 +3,6 @@ "settings": { "index": { "number_of_shards": 16, - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json index 470edd710136d..6c96fb21673ae 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json @@ -3,7 +3,6 @@ "settings": { "index": { "number_of_shards": 16, - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true, diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json index 48b88492a777d..9271718bd27ed 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json @@ -3,7 +3,6 @@ "settings": { "index": { "number_of_shards": "16", - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json index 72d8cf6e1dfc2..7e7229e83c823 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json @@ -5,7 +5,6 @@ "template": { "settings": { "index": { - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json index 6f32af12c84bf..71c4d15989b7a 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json @@ -5,7 +5,6 @@ "template": { "settings": { "index": { - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json index d3c5b0af215e6..20849bfe8f27d 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json @@ -5,7 +5,6 @@ "template": { "settings": { "index": { - "number_of_replicas": 0, "auto_expand_replicas": "0-1", "refresh_interval": "10s", "hidden": true diff --git a/x-pack/plugin/core/template-resources/src/main/resources/slm-history.json b/x-pack/plugin/core/template-resources/src/main/resources/slm-history.json index c154cdfe19d66..e3b13f3f8c841 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/slm-history.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/slm-history.json @@ -9,7 +9,6 @@ "template": { "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1" }, "lifecycle": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/watch-history-no-ilm.json b/x-pack/plugin/core/template-resources/src/main/resources/watch-history-no-ilm.json index d8b5ea75d88c4..2eed69c7c58e6 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/watch-history-no-ilm.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/watch-history-no-ilm.json @@ -7,7 +7,6 @@ "template": { "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", "index.hidden": true, "index.format": 6 diff --git a/x-pack/plugin/core/template-resources/src/main/resources/watch-history.json b/x-pack/plugin/core/template-resources/src/main/resources/watch-history.json index 79b0c6fb228bd..19e4dc022daa1 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/watch-history.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/watch-history.json @@ -7,7 +7,6 @@ "template": { "settings": { "index.number_of_shards": 1, - "index.number_of_replicas": 0, "index.auto_expand_replicas": "0-1", "index.lifecycle.name": "watch-history-ilm-policy-16", "index.hidden": true, diff --git a/x-pack/plugin/eql/qa/correctness/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDataLoader.java b/x-pack/plugin/eql/qa/correctness/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDataLoader.java index 9fa0320527f93..7c0b70c658e7a 100644 --- a/x-pack/plugin/eql/qa/correctness/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDataLoader.java +++ b/x-pack/plugin/eql/qa/correctness/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDataLoader.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.LogConfigurator; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; @@ -68,13 +69,16 @@ static Properties loadConfiguration() throws IOException { } } + // timeout is not rendered in the JSON so doesn't matter + private static final TimeValue DUMMY_TIMEOUT = TimeValue.THIRTY_SECONDS; + static void restoreSnapshot(RestClient client, Properties cfg) throws IOException { int status = client.performRequest(new Request("HEAD", "/" + cfg.getProperty("index_name"))).getStatusLine().getStatusCode(); if (status == 404) { Request createRepo = new Request("PUT", "/_snapshot/" + cfg.getProperty("gcs_repo_name")); createRepo.setJsonEntity( Strings.toString( - new PutRepositoryRequest().type("gcs") + new PutRepositoryRequest(DUMMY_TIMEOUT, DUMMY_TIMEOUT).type("gcs") .settings( Settings.builder() .put("bucket", cfg.getProperty("gcs_bucket_name")) diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java index cf8ac05e14a1d..8d37feb37d8b6 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorFactory.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.lucene; -import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedNumericDocValues; @@ -18,7 +17,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.common.Rounding; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BytesRefVector; import org.elasticsearch.compute.data.DocVector; @@ -29,8 +27,6 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.Releasables; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import java.io.IOException; import java.io.UncheckedIOException; @@ -52,25 +48,21 @@ public class TimeSeriesSortedSourceOperatorFactory extends LuceneOperator.Factory { private final int maxPageSize; - private final TimeValue timeSeriesPeriod; private TimeSeriesSortedSourceOperatorFactory( List contexts, Function queryFunction, int taskConcurrency, int maxPageSize, - TimeValue timeSeriesPeriod, int limit ) { super(contexts, queryFunction, DataPartitioning.SHARD, taskConcurrency, limit, ScoreMode.COMPLETE_NO_SCORES); this.maxPageSize = maxPageSize; - this.timeSeriesPeriod = timeSeriesPeriod; } @Override public SourceOperator get(DriverContext driverContext) { - var rounding = timeSeriesPeriod.equals(TimeValue.ZERO) == false ? Rounding.builder(timeSeriesPeriod).build() : null; - return new Impl(driverContext.blockFactory(), sliceQueue, maxPageSize, limit, rounding); + return new Impl(driverContext.blockFactory(), sliceQueue, maxPageSize, limit); } @Override @@ -82,18 +74,10 @@ public static TimeSeriesSortedSourceOperatorFactory create( int limit, int maxPageSize, int taskConcurrency, - TimeValue timeSeriesPeriod, List searchContexts, Function queryFunction ) { - return new TimeSeriesSortedSourceOperatorFactory( - searchContexts, - queryFunction, - taskConcurrency, - maxPageSize, - timeSeriesPeriod, - limit - ); + return new TimeSeriesSortedSourceOperatorFactory(searchContexts, queryFunction, taskConcurrency, maxPageSize, limit); } static final class Impl extends SourceOperator { @@ -101,20 +85,18 @@ static final class Impl extends SourceOperator { private final int maxPageSize; private final BlockFactory blockFactory; private final LuceneSliceQueue sliceQueue; - private final Rounding.Prepared rounding; private int currentPagePos = 0; private int remainingDocs; private boolean doneCollecting; private IntVector.Builder docsBuilder; private IntVector.Builder segmentsBuilder; private LongVector.Builder timestampsBuilder; - private LongVector.Builder intervalsBuilder; // TODO: add an ordinal block for tsid hashes // (This allows for efficiently grouping by tsid locally, no need to use bytes representation of tsid hash) private BytesRefVector.Builder tsHashesBuilder; private TimeSeriesIterator iterator; - Impl(BlockFactory blockFactory, LuceneSliceQueue sliceQueue, int maxPageSize, int limit, Rounding rounding) { + Impl(BlockFactory blockFactory, LuceneSliceQueue sliceQueue, int maxPageSize, int limit) { this.maxPageSize = maxPageSize; this.blockFactory = blockFactory; this.remainingDocs = limit; @@ -123,27 +105,6 @@ static final class Impl extends SourceOperator { this.timestampsBuilder = blockFactory.newLongVectorBuilder(Math.min(limit, maxPageSize)); this.tsHashesBuilder = blockFactory.newBytesRefVectorBuilder(Math.min(limit, maxPageSize)); this.sliceQueue = sliceQueue; - if (rounding != null) { - try { - long minTimestamp = Long.MAX_VALUE; - long maxTimestamp = Long.MIN_VALUE; - for (var slice : sliceQueue.getSlices()) { - for (var leaf : slice.leaves()) { - var pointValues = leaf.leafReaderContext().reader().getPointValues(DataStreamTimestampFieldMapper.DEFAULT_PATH); - long segmentMin = LongPoint.decodeDimension(pointValues.getMinPackedValue(), 0); - minTimestamp = Math.min(segmentMin, minTimestamp); - long segmentMax = LongPoint.decodeDimension(pointValues.getMaxPackedValue(), 0); - maxTimestamp = Math.max(segmentMax, maxTimestamp); - } - } - this.rounding = rounding.prepare(minTimestamp, maxTimestamp); - this.intervalsBuilder = blockFactory.newLongVectorBuilder(Math.min(limit, maxPageSize)); - } catch (IOException ioe) { - throw new UncheckedIOException(ioe); - } - } else { - this.rounding = null; - } } @Override @@ -172,7 +133,6 @@ public Page getOutput() { IntVector leaf = null; IntVector docs = null; LongVector timestamps = null; - LongVector intervals = null; BytesRefVector tsids = null; try { if (iterator == null) { @@ -201,20 +161,13 @@ public Page getOutput() { timestamps = timestampsBuilder.build(); timestampsBuilder = blockFactory.newLongVectorBuilder(Math.min(remainingDocs, maxPageSize)); - if (rounding != null) { - intervals = intervalsBuilder.build(); - intervalsBuilder = blockFactory.newLongVectorBuilder(Math.min(remainingDocs, maxPageSize)); - } else { - intervals = blockFactory.newConstantLongVector(0, timestamps.getPositionCount()); - } tsids = tsHashesBuilder.build(); tsHashesBuilder = blockFactory.newBytesRefVectorBuilder(Math.min(remainingDocs, maxPageSize)); page = new Page( currentPagePos, new DocVector(shard.asVector(), leaf, docs, leaf.isConstant()).asBlock(), tsids.asBlock(), - timestamps.asBlock(), - intervals.asBlock() + timestamps.asBlock() ); currentPagePos = 0; @@ -225,7 +178,7 @@ public Page getOutput() { throw new UncheckedIOException(e); } finally { if (page == null) { - Releasables.closeExpectNoException(shard, leaf, docs, timestamps, tsids, intervals); + Releasables.closeExpectNoException(shard, leaf, docs, timestamps, tsids); } } return page; @@ -233,7 +186,7 @@ public Page getOutput() { @Override public void close() { - Releasables.closeExpectNoException(docsBuilder, segmentsBuilder, timestampsBuilder, intervalsBuilder, tsHashesBuilder); + Releasables.closeExpectNoException(docsBuilder, segmentsBuilder, timestampsBuilder, tsHashesBuilder); } class TimeSeriesIterator { @@ -289,9 +242,6 @@ void consume() throws IOException { segmentsBuilder.appendInt(leaf.segmentOrd); docsBuilder.appendInt(leaf.iterator.docID()); timestampsBuilder.appendLong(leaf.timestamp); - if (rounding != null) { - intervalsBuilder.appendLong(rounding.round(leaf.timestamp)); - } tsHashesBuilder.appendBytesRef(currentTsid); final Leaf newTop; if (leaf.nextDoc()) { @@ -318,9 +268,6 @@ void consume() throws IOException { while (leaf.nextDoc()) { tsHashesBuilder.appendBytesRef(leaf.timeSeriesHash); timestampsBuilder.appendLong(leaf.timestamp); - if (rounding != null) { - intervalsBuilder.appendLong(rounding.round(leaf.timestamp)); - } // Don't append segment ord, because there is only one segment. docsBuilder.appendInt(leaf.iterator.docID()); currentPagePos++; diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleAggregatorFunctionTests.java index b5525b985be90..e5bb8e3138e25 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleAggregatorFunctionTests.java @@ -13,7 +13,9 @@ import org.elasticsearch.compute.operator.SequenceDoubleBlockSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -37,7 +39,14 @@ protected String expectedDescriptionOfAggregator() { @Override public void assertSimpleOutput(List input, Block result) { - Object[] values = input.stream().flatMapToDouble(b -> allDoubles(b)).boxed().collect(Collectors.toSet()).toArray(Object[]::new); - assertThat((List) BlockUtils.toJavaObject(result, 0), containsInAnyOrder(values)); + TreeSet set = new TreeSet<>((List) BlockUtils.toJavaObject(result, 0)); + Object[] values = input.stream() + .flatMapToDouble(AggregatorFunctionTestCase::allDoubles) + .boxed() + .collect(Collectors.toSet()) + .toArray(Object[]::new); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunctionTests.java index 4554a60b7a00c..a4b1a3c028e43 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesDoubleGroupingAggregatorFunctionTests.java @@ -15,7 +15,9 @@ import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.Tuple; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.LongStream; @@ -53,7 +55,12 @@ public void assertSimpleGroup(List input, Block result, int position, Long switch (values.length) { case 0 -> assertThat(resultValue, nullValue()); case 1 -> assertThat(resultValue, equalTo(values[0])); - default -> assertThat((List) resultValue, containsInAnyOrder(values)); + default -> { + TreeSet set = new TreeSet<>((List) resultValue); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } + } } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatAggregatorFunctionTests.java index 899a89dd993a4..67068ce10c997 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatAggregatorFunctionTests.java @@ -13,7 +13,9 @@ import org.elasticsearch.compute.operator.SequenceFloatBlockSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -37,7 +39,10 @@ protected String expectedDescriptionOfAggregator() { @Override public void assertSimpleOutput(List input, Block result) { + TreeSet set = new TreeSet<>((List) BlockUtils.toJavaObject(result, 0)); Object[] values = input.stream().flatMap(AggregatorFunctionTestCase::allFloats).collect(Collectors.toSet()).toArray(Object[]::new); - assertThat((List) BlockUtils.toJavaObject(result, 0), containsInAnyOrder(values)); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunctionTests.java index 787b6fd4c75be..e25d7567a1933 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesFloatGroupingAggregatorFunctionTests.java @@ -15,7 +15,9 @@ import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.Tuple; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.LongStream; @@ -49,7 +51,12 @@ public void assertSimpleGroup(List input, Block result, int position, Long switch (values.length) { case 0 -> assertThat(resultValue, nullValue()); case 1 -> assertThat(resultValue, equalTo(values[0])); - default -> assertThat((List) resultValue, containsInAnyOrder(values)); + default -> { + TreeSet set = new TreeSet<>((List) resultValue); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } + } } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntAggregatorFunctionTests.java index 46e31b589997a..c60707046a0b1 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntAggregatorFunctionTests.java @@ -7,20 +7,20 @@ package org.elasticsearch.compute.aggregation; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockUtils; import org.elasticsearch.compute.operator.SequenceIntBlockSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.IntStream; import static org.hamcrest.Matchers.containsInAnyOrder; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/109932") public class ValuesIntAggregatorFunctionTests extends AggregatorFunctionTestCase { @Override protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { @@ -39,7 +39,14 @@ protected String expectedDescriptionOfAggregator() { @Override public void assertSimpleOutput(List input, Block result) { - Object[] values = input.stream().flatMapToInt(b -> allInts(b)).boxed().collect(Collectors.toSet()).toArray(Object[]::new); - assertThat((List) BlockUtils.toJavaObject(result, 0), containsInAnyOrder(values)); + TreeSet set = new TreeSet<>((List) BlockUtils.toJavaObject(result, 0)); + Object[] values = input.stream() + .flatMapToInt(AggregatorFunctionTestCase::allInts) + .boxed() + .collect(Collectors.toSet()) + .toArray(Object[]::new); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunctionTests.java index 831e2c1fdfd68..154b076d6a246 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesIntGroupingAggregatorFunctionTests.java @@ -15,7 +15,9 @@ import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.Tuple; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.LongStream; @@ -49,7 +51,12 @@ public void assertSimpleGroup(List input, Block result, int position, Long switch (values.length) { case 0 -> assertThat(resultValue, nullValue()); case 1 -> assertThat(resultValue, equalTo(values[0])); - default -> assertThat((List) resultValue, containsInAnyOrder(values)); + default -> { + TreeSet set = new TreeSet<>((List) resultValue); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } + } } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongAggregatorFunctionTests.java index e2a77bed4f4cd..4b01603b3768d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongAggregatorFunctionTests.java @@ -13,7 +13,9 @@ import org.elasticsearch.compute.operator.SequenceLongBlockSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.LongStream; @@ -37,7 +39,14 @@ protected String expectedDescriptionOfAggregator() { @Override public void assertSimpleOutput(List input, Block result) { - Object[] values = input.stream().flatMapToLong(b -> allLongs(b)).boxed().collect(Collectors.toSet()).toArray(Object[]::new); - assertThat((List) BlockUtils.toJavaObject(result, 0), containsInAnyOrder(values)); + TreeSet set = new TreeSet<>((List) BlockUtils.toJavaObject(result, 0)); + Object[] values = input.stream() + .flatMapToLong(AggregatorFunctionTestCase::allLongs) + .boxed() + .collect(Collectors.toSet()) + .toArray(Object[]::new); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunctionTests.java index ab667b959c7ae..8259d84d955ef 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/ValuesLongGroupingAggregatorFunctionTests.java @@ -15,7 +15,9 @@ import org.elasticsearch.compute.operator.TupleBlockSourceOperator; import org.elasticsearch.core.Tuple; +import java.util.Arrays; import java.util.List; +import java.util.TreeSet; import java.util.stream.Collectors; import java.util.stream.LongStream; @@ -49,7 +51,12 @@ public void assertSimpleGroup(List input, Block result, int position, Long switch (values.length) { case 0 -> assertThat(resultValue, nullValue()); case 1 -> assertThat(resultValue, equalTo(values[0])); - default -> assertThat((List) resultValue, containsInAnyOrder(values)); + default -> { + TreeSet set = new TreeSet<>((List) resultValue); + if (false == set.containsAll(Arrays.asList(values))) { + assertThat(set, containsInAnyOrder(values)); + } + } } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java index 17d302f198bff..b126ca8af0e31 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/TimeSeriesSortedSourceOperatorTests.java @@ -41,7 +41,6 @@ import org.elasticsearch.compute.operator.TestResultPageSinkOperator; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.DataStreamTimestampFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; @@ -85,12 +84,12 @@ public void testSimple() { // for now we emit at most one time series each page int offset = 0; for (Page page : results) { - assertThat(page.getBlockCount(), equalTo(6)); + assertThat(page.getBlockCount(), equalTo(5)); DocVector docVector = (DocVector) page.getBlock(0).asVector(); BytesRefVector tsidVector = (BytesRefVector) page.getBlock(1).asVector(); LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); - LongVector voltageVector = (LongVector) page.getBlock(4).asVector(); - BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(5).asVector(); + LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); + BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); for (int i = 0; i < page.getPositionCount(); i++) { int expectedTsidOrd = offset / numSamplesPerTS; String expectedHostname = String.format(Locale.ROOT, "host-%02d", expectedTsidOrd); @@ -115,7 +114,7 @@ public void testLimit() { List results = runDriver(limit, randomIntBetween(1, 1024), randomBoolean(), numTimeSeries, numSamplesPerTS, timestampStart); assertThat(results, hasSize(1)); Page page = results.get(0); - assertThat(page.getBlockCount(), equalTo(6)); + assertThat(page.getBlockCount(), equalTo(5)); DocVector docVector = (DocVector) page.getBlock(0).asVector(); assertThat(docVector.getPositionCount(), equalTo(limit)); @@ -126,10 +125,10 @@ public void testLimit() { LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); assertThat(timestampVector.getPositionCount(), equalTo(limit)); - LongVector voltageVector = (LongVector) page.getBlock(4).asVector(); + LongVector voltageVector = (LongVector) page.getBlock(3).asVector(); assertThat(voltageVector.getPositionCount(), equalTo(limit)); - BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(5).asVector(); + BytesRefVector hostnameVector = (BytesRefVector) page.getBlock(4).asVector(); assertThat(hostnameVector.getPositionCount(), equalTo(limit)); assertThat(docVector.shards().getInt(0), equalTo(0)); @@ -161,7 +160,6 @@ record Doc(int host, long timestamp, long metric) {} limit, maxPageSize, randomBoolean(), - TimeValue.ZERO, writer -> { Randomness.shuffle(docs); for (Doc doc : docs) { @@ -194,11 +192,11 @@ record Doc(int host, long timestamp, long metric) {} assertThat(page.getPositionCount(), lessThanOrEqualTo(limit)); assertThat(page.getPositionCount(), lessThanOrEqualTo(maxPageSize)); } - assertThat(page.getBlockCount(), equalTo(5)); + assertThat(page.getBlockCount(), equalTo(4)); DocVector docVector = (DocVector) page.getBlock(0).asVector(); BytesRefVector tsidVector = (BytesRefVector) page.getBlock(1).asVector(); LongVector timestampVector = (LongVector) page.getBlock(2).asVector(); - LongVector metricVector = (LongVector) page.getBlock(4).asVector(); + LongVector metricVector = (LongVector) page.getBlock(3).asVector(); for (int i = 0; i < page.getPositionCount(); i++) { Doc doc = docs.get(offset); offset++; @@ -242,7 +240,6 @@ public void testMatchNone() throws Exception { Integer.MAX_VALUE, randomIntBetween(1, 1024), 1, - TimeValue.ZERO, List.of(ctx), unused -> query ); @@ -264,7 +261,7 @@ public void testMatchNone() throws Exception { @Override protected Operator.OperatorFactory simple() { - return createTimeSeriesSourceOperator(directory, r -> this.reader = r, 1, 1, false, TimeValue.ZERO, writer -> { + return createTimeSeriesSourceOperator(directory, r -> this.reader = r, 1, 1, false, writer -> { long timestamp = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2024-01-01T00:00:00Z"); writeTS(writer, timestamp, new Object[] { "hostname", "host-01" }, new Object[] { "voltage", 2 }); return 1; @@ -289,7 +286,6 @@ List runDriver(int limit, int maxPageSize, boolean forceMerge, int numTime limit, maxPageSize, forceMerge, - TimeValue.ZERO, writer -> { long timestamp = timestampStart; for (int i = 0; i < numSamplesPerTS; i++) { @@ -333,7 +329,6 @@ public static TimeSeriesSortedSourceOperatorFactory createTimeSeriesSourceOperat int limit, int maxPageSize, boolean forceMerge, - TimeValue timeValue, CheckedFunction indexingLogic ) { Sort sort = new Sort( @@ -361,7 +356,7 @@ public static TimeSeriesSortedSourceOperatorFactory createTimeSeriesSourceOperat } var ctx = new LuceneSourceOperatorTests.MockShardContext(reader, 0); Function queryFunction = c -> new MatchAllDocsQuery(); - return TimeSeriesSortedSourceOperatorFactory.create(limit, maxPageSize, 1, timeValue, List.of(ctx), queryFunction); + return TimeSeriesSortedSourceOperatorFactory.create(limit, maxPageSize, 1, List.of(ctx), queryFunction); } public static void writeTS(RandomIndexWriter iw, long timestamp, Object[] dimensions, Object[] metrics) throws IOException { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TimeSeriesAggregationOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TimeSeriesAggregationOperatorTests.java index 573c960e86b9c..da1a9c9408f90 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TimeSeriesAggregationOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TimeSeriesAggregationOperatorTests.java @@ -11,13 +11,17 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.compute.aggregation.RateLongAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.SumDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.blockhash.BlockHash; +import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockUtils; import org.elasticsearch.compute.data.ElementType; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperatorTests; import org.elasticsearch.core.IOUtils; @@ -27,6 +31,7 @@ import org.junit.After; import java.io.IOException; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.List; import java.util.stream.IntStream; @@ -203,7 +208,6 @@ record Doc(String pod, String cluster, long timestamp, long requests) { Integer.MAX_VALUE, between(1, 100), randomBoolean(), - bucketInterval, writer -> { List docs = new ArrayList<>(); for (Pod pod : pods) { @@ -227,15 +231,35 @@ record Doc(String pod, String cluster, long timestamp, long requests) { ); var ctx = driverContext(); - List extractOperators = new ArrayList<>(); + List intermediateOperators = new ArrayList<>(); + final Rounding.Prepared rounding = new Rounding.Builder(bucketInterval).timeZone(ZoneOffset.UTC).build().prepareForUnknown(); + var timeBucket = new EvalOperator(ctx.blockFactory(), new EvalOperator.ExpressionEvaluator() { + @Override + public Block eval(Page page) { + LongBlock timestampsBlock = page.getBlock(2); + LongVector timestamps = timestampsBlock.asVector(); + try (var builder = blockFactory().newLongVectorFixedBuilder(timestamps.getPositionCount())) { + for (int i = 0; i < timestamps.getPositionCount(); i++) { + builder.appendLong(rounding.round(timestampsBlock.getLong(i))); + } + return builder.build().asBlock(); + } + } + + @Override + public void close() { + + } + }); + intermediateOperators.add(timeBucket); var rateField = new NumberFieldMapper.NumberFieldType("requests", NumberFieldMapper.NumberType.LONG); Operator extractRate = (ValuesSourceReaderOperatorTests.factory(reader, rateField, ElementType.LONG).get(ctx)); - extractOperators.add(extractRate); + intermediateOperators.add(extractRate); List nonBucketGroupings = new ArrayList<>(groupings); nonBucketGroupings.remove("bucket"); for (String grouping : nonBucketGroupings) { var groupingField = new KeywordFieldMapper.KeywordFieldType(grouping); - extractOperators.add(ValuesSourceReaderOperatorTests.factory(reader, groupingField, ElementType.BYTES_REF).get(ctx)); + intermediateOperators.add(ValuesSourceReaderOperatorTests.factory(reader, groupingField, ElementType.BYTES_REF).get(ctx)); } // _doc, tsid, timestamp, bucket, requests, grouping1, grouping2 Operator intialAgg = new TimeSeriesAggregationOperatorFactories.Initial( @@ -278,7 +302,7 @@ record Doc(String pod, String cluster, long timestamp, long requests) { new Driver( ctx, sourceOperatorFactory.get(ctx), - CollectionUtils.concatLists(extractOperators, List.of(intialAgg, intermediateAgg, finalAgg)), + CollectionUtils.concatLists(intermediateOperators, List.of(intialAgg, intermediateAgg, finalAgg)), new TestResultPageSinkOperator(results::add), () -> {} ) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index ebfcdacd7587a..9b759a49eab4e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -14,6 +14,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Locale; import java.util.Set; /** @@ -22,83 +23,90 @@ * {@link RestNodesCapabilitiesAction} and we use them to enable tests. */ public class EsqlCapabilities { - /** - * Support for function {@code CBRT}. Done in #108574. - */ - private static final String FN_CBRT = "fn_cbrt"; + public enum Cap { + /** + * Support for function {@code CBRT}. Done in #108574. + */ + FN_CBRT, - /** - * Support for {@code MV_APPEND} function. #107001 - */ - private static final String FN_MV_APPEND = "fn_mv_append"; + /** + * Support for {@code MV_APPEND} function. #107001 + */ + FN_MV_APPEND, - /** - * Support for function {@code IP_PREFIX}. - */ - private static final String FN_IP_PREFIX = "fn_ip_prefix"; + /** + * Support for function {@code IP_PREFIX}. + */ + FN_IP_PREFIX, - /** - * Fix on function {@code SUBSTRING} that makes it not return null on empty strings. - */ - private static final String FN_SUBSTRING_EMPTY_NULL = "fn_substring_empty_null"; + /** + * Fix on function {@code SUBSTRING} that makes it not return null on empty strings. + */ + FN_SUBSTRING_EMPTY_NULL, - /** - * Support for aggregation function {@code TOP_LIST}. - */ - private static final String AGG_TOP_LIST = "agg_top_list"; + /** + * Support for aggregation function {@code TOP_LIST}. + */ + AGG_TOP_LIST, - /** - * Optimization for ST_CENTROID changed some results in cartesian data. #108713 - */ - private static final String ST_CENTROID_AGG_OPTIMIZED = "st_centroid_agg_optimized"; + /** + * Optimization for ST_CENTROID changed some results in cartesian data. #108713 + */ + ST_CENTROID_AGG_OPTIMIZED, - /** - * Support for requesting the "_ignored" metadata field. - */ - private static final String METADATA_IGNORED_FIELD = "metadata_field_ignored"; + /** + * Support for requesting the "_ignored" metadata field. + */ + METADATA_IGNORED_FIELD, - /** - * Support for the syntax {@code "tables": {"type": []}}. - */ - private static final String TABLES_TYPES = "tables_types"; + /** + * Support for the syntax {@code "tables": {"type": []}}. + */ + TABLES_TYPES(true), - /** - * Support for requesting the "REPEAT" command. - */ - private static final String REPEAT = "repeat"; + /** + * Support for requesting the "REPEAT" command. + */ + REPEAT, - /** - * Cast string literals to datetime in addition and subtraction when the other side is a date or time interval. - */ - public static final String STRING_LITERAL_AUTO_CASTING_TO_DATETIME_ADD_SUB = "string_literal_auto_casting_to_datetime_add_sub"; + /** + * Cast string literals to datetime in addition and subtraction when the other side is a date or time interval. + */ + STRING_LITERAL_AUTO_CASTING_TO_DATETIME_ADD_SUB, - /** - * Support multiple field mappings if appropriate conversion function is used (union types) - */ - public static final String UNION_TYPES = "union_types"; + /** + * Support for named or positional parameters in EsqlQueryRequest. + */ + NAMED_POSITIONAL_PARAMETER, - /** - * Support for named or positional parameters in EsqlQueryRequest. - */ - private static final String NAMED_POSITIONAL_PARAMETER = "named_positional_parameter"; + /** + * Support multiple field mappings if appropriate conversion function is used (union types) + */ + UNION_TYPES; + + Cap() { + snapshotOnly = false; + }; + + Cap(boolean snapshotOnly) { + this.snapshotOnly = snapshotOnly; + }; + + public String capabilityName() { + return name().toLowerCase(Locale.ROOT); + } + + private final boolean snapshotOnly; + } public static final Set CAPABILITIES = capabilities(); private static Set capabilities() { List caps = new ArrayList<>(); - caps.add(FN_CBRT); - caps.add(FN_IP_PREFIX); - caps.add(FN_SUBSTRING_EMPTY_NULL); - caps.add(AGG_TOP_LIST); - caps.add(ST_CENTROID_AGG_OPTIMIZED); - caps.add(METADATA_IGNORED_FIELD); - caps.add(FN_MV_APPEND); - caps.add(REPEAT); - caps.add(UNION_TYPES); - caps.add(NAMED_POSITIONAL_PARAMETER); - - if (Build.current().isSnapshot()) { - caps.add(TABLES_TYPES); + for (Cap cap : Cap.values()) { + if (Build.current().isSnapshot() || cap.snapshotOnly == false) { + caps.add(cap.capabilityName()); + } } /* @@ -110,7 +118,6 @@ private static Set capabilities() { for (NodeFeature feature : new EsqlFeatures().getHistoricalFeatures().keySet()) { caps.add(cap(feature)); } - caps.add(STRING_LITERAL_AUTO_CASTING_TO_DATETIME_ADD_SUB); return Set.copyOf(caps); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java index fc43f1002d112..13773ca61f8d8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EsQueryExec.java @@ -31,7 +31,6 @@ public class EsQueryExec extends LeafExec implements EstimatesRowSize { static final EsField DOC_ID_FIELD = new EsField("_doc", DataType.DOC_DATA_TYPE, Map.of(), false); static final EsField TSID_FIELD = new EsField("_tsid", DataType.TSID_DATA_TYPE, Map.of(), true); static final EsField TIMESTAMP_FIELD = new EsField("@timestamp", DataType.DATETIME, Map.of(), true); - static final EsField INTERVAL_FIELD = new EsField("@timestamp_interval", DataType.DATETIME, Map.of(), true); private final EsIndex index; private final IndexMode indexMode; @@ -86,8 +85,7 @@ private static List sourceAttributes(Source source, IndexMode indexMo case TIME_SERIES -> List.of( new FieldAttribute(source, DOC_ID_FIELD.getName(), DOC_ID_FIELD), new FieldAttribute(source, TSID_FIELD.getName(), TSID_FIELD), - new FieldAttribute(source, TIMESTAMP_FIELD.getName(), TIMESTAMP_FIELD), - new FieldAttribute(source, INTERVAL_FIELD.getName(), INTERVAL_FIELD) + new FieldAttribute(source, TIMESTAMP_FIELD.getName(), TIMESTAMP_FIELD) ); }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index fdba785f668d7..825057c20d0e0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -32,7 +32,6 @@ import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; import org.elasticsearch.compute.operator.SourceOperator; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.mapper.BlockLoader; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; @@ -184,7 +183,6 @@ public final PhysicalOperation sourcePhysicalOperation(EsQueryExec esQueryExec, limit, context.pageSize(rowEstimatedSize), context.queryPragmas().taskConcurrency(), - TimeValue.ZERO, shardContexts, querySupplier(esQueryExec.query()) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index 960f341857b1f..fd161c8d63871 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -229,7 +229,10 @@ public final void test() throws Throwable { assumeFalse("metadata fields aren't supported", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METADATA_FIELDS))); assumeFalse("enrich can't load fields in csv tests", testCase.requiredCapabilities.contains(cap(EsqlFeatures.ENRICH_LOAD))); assumeFalse("can't load metrics in csv tests", testCase.requiredCapabilities.contains(cap(EsqlFeatures.METRICS_SYNTAX))); - assumeFalse("multiple indices aren't supported", testCase.requiredCapabilities.contains(EsqlCapabilities.UNION_TYPES)); + assumeFalse( + "multiple indices aren't supported", + testCase.requiredCapabilities.contains(EsqlCapabilities.Cap.UNION_TYPES.capabilityName()) + ); if (Build.current().isSnapshot()) { assertThat( diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index f27438de6df6b..249d4f7349517 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -161,6 +161,24 @@ protected static Iterable parameterSuppliersFromTypedData(List + * Use if possible, as this method may get updated with new checks in the future. + *

+ * + * @param entirelyNullPreservesType See {@link #anyNullIsNull(boolean, List)} + */ + protected static Iterable parameterSuppliersFromTypedDataWithDefaultChecks( + boolean entirelyNullPreservesType, + List suppliers + ) { + return parameterSuppliersFromTypedData( + errorsForCasesWithoutExamples(anyNullIsNull(entirelyNullPreservesType, randomizeBytesRefsOffset(suppliers))) + ); + } + /** * Build an {@link Attribute} that loads a field. */ diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java index 2096d9cec75b1..d97f070275617 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/FromBase64Tests.java @@ -55,7 +55,7 @@ public static Iterable parameters() { ); })); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java index dd35e04708c9f..4c9175e4906bf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBase64Tests.java @@ -55,7 +55,7 @@ public static Iterable parameters() { ); })); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanTests.java index 3a25ad6b56d0c..c4e53d922ac60 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanTests.java @@ -80,7 +80,7 @@ public static Iterable parameters() { emptyList() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java index b520e559c45d7..1c1431fe3b7ea 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java @@ -72,7 +72,7 @@ public static Iterable parameters() { ); } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java index 9eb1155a209a1..48a610804845d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianShapeTests.java @@ -73,7 +73,7 @@ public static Iterable parameters() { ); } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java index 0ef931710422e..6aef91be43088 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java @@ -162,7 +162,7 @@ public static Iterable parameters() { ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } private static String randomDateString(long from, long to) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java index b7cb03879fd6f..fc45c8b26a869 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java @@ -89,7 +89,7 @@ public static Iterable parameters() { ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java index 6438a8422a664..5f45cc11d9c5a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleTests.java @@ -139,7 +139,7 @@ public static Iterable parameters() { List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java index e1af4441b3c5f..2b5dc453acc23 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java @@ -66,7 +66,7 @@ public static Iterable parameters() { ); } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java index 291708e94888c..bca8dc822052f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoShapeTests.java @@ -66,7 +66,7 @@ public static Iterable parameters() { List.of() ); } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPTests.java index 415d9ea0a4a70..20b48d24f8211 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPTests.java @@ -63,7 +63,7 @@ public static Iterable parameters() { ); // add null as parameter - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java index 83bdaf2f2d304..45837c2110ff3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerTests.java @@ -268,7 +268,7 @@ public static Iterable parameters() { List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java index 92b0bb192e2aa..565562b8574d2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongTests.java @@ -227,7 +227,7 @@ public static Iterable parameters() { l -> ((Integer) l).longValue(), List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansTests.java index 67951b46d03b5..3f6e28c65142f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansTests.java @@ -70,7 +70,7 @@ public static Iterable parameters() { List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java index 511df557ff842..0556742b55b3c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringTests.java @@ -130,7 +130,7 @@ public static Iterable parameters() { v -> new BytesRef(v.toString()), List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java index 4182f99d316fc..44092db499d2d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongTests.java @@ -244,7 +244,7 @@ public static Iterable parameters() { ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java index a397de64aeea8..34281442872a5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionTests.java @@ -60,7 +60,7 @@ public static Iterable parameters() { ); } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java index 221f3fd51a545..bce3b7efebbb6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java @@ -39,54 +39,50 @@ public DateExtractTests(@Name("TestCase") Supplier te @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples( - anyNullIsNull( - true, - List.of( - new TestCaseSupplier( - List.of(DataType.KEYWORD, DataType.DATETIME), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("YeAr"), DataType.KEYWORD, "chrono"), - new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "date") - ), - "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", - DataType.LONG, - equalTo(2023L) - ) + return parameterSuppliersFromTypedDataWithDefaultChecks( + true, + List.of( + new TestCaseSupplier( + List.of(DataType.KEYWORD, DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("YeAr"), DataType.KEYWORD, "chrono"), + new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "date") ), - new TestCaseSupplier( - List.of(DataType.TEXT, DataType.DATETIME), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("YeAr"), DataType.TEXT, "chrono"), - new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "date") - ), - "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", - DataType.LONG, - equalTo(2023L) - ) + "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", + DataType.LONG, + equalTo(2023L) + ) + ), + new TestCaseSupplier( + List.of(DataType.TEXT, DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("YeAr"), DataType.TEXT, "chrono"), + new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "date") ), - new TestCaseSupplier( - List.of(DataType.KEYWORD, DataType.DATETIME), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("not a unit"), DataType.KEYWORD, "chrono"), - new TestCaseSupplier.TypedData(0L, DataType.DATETIME, "date") + "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", + DataType.LONG, + equalTo(2023L) + ) + ), + new TestCaseSupplier( + List.of(DataType.KEYWORD, DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("not a unit"), DataType.KEYWORD, "chrono"), + new TestCaseSupplier.TypedData(0L, DataType.DATETIME, "date") - ), - "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", - DataType.LONG, - is(nullValue()) - ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") - .withWarning( - "Line -1:-1: java.lang.IllegalArgumentException: " - + "No enum constant java.time.temporal.ChronoField.NOT A UNIT" - ) - .withFoldingException(InvalidArgumentException.class, "invalid date field for []: not a unit") + ), + "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", + DataType.LONG, + is(nullValue()) + ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") + .withWarning( + "Line -1:-1: java.lang.IllegalArgumentException: " + + "No enum constant java.time.temporal.ChronoField.NOT A UNIT" ) - ) + .withFoldingException(InvalidArgumentException.class, "invalid date field for []: not a unit") ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java index 6e1b5caa710e1..b18748187709a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormatTests.java @@ -31,35 +31,31 @@ public DateFormatTests(@Name("TestCase") Supplier tes @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples( - anyNullIsNull( - true, - List.of( - new TestCaseSupplier( - List.of(DataType.KEYWORD, DataType.DATETIME), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy"), DataType.KEYWORD, "formatter"), - new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "val") - ), - "DateFormatEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], locale=en_US]", - DataType.KEYWORD, - equalTo(BytesRefs.toBytesRef("2023")) - ) + return parameterSuppliersFromTypedDataWithDefaultChecks( + true, + List.of( + new TestCaseSupplier( + List.of(DataType.KEYWORD, DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy"), DataType.KEYWORD, "formatter"), + new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "val") ), - new TestCaseSupplier( - List.of(DataType.TEXT, DataType.DATETIME), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy"), DataType.TEXT, "formatter"), - new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "val") - ), - "DateFormatEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], locale=en_US]", - DataType.KEYWORD, - equalTo(BytesRefs.toBytesRef("2023")) - ) - ) + "DateFormatEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], locale=en_US]", + DataType.KEYWORD, + equalTo(BytesRefs.toBytesRef("2023")) + ) + ), + new TestCaseSupplier( + List.of(DataType.TEXT, DataType.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy"), DataType.TEXT, "formatter"), + new TestCaseSupplier.TypedData(1687944333000L, DataType.DATETIME, "val") + ), + "DateFormatEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], locale=en_US]", + DataType.KEYWORD, + equalTo(BytesRefs.toBytesRef("2023")) ) ) ) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java index 8906994c6d7eb..161b338cc85b2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java @@ -35,102 +35,97 @@ public DateParseTests(@Name("TestCase") Supplier test @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples( - anyNullIsNull( - true, - List.of( - new TestCaseSupplier( - "Basic Case", - List.of(DataType.KEYWORD, DataType.KEYWORD), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), - new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") - ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", - DataType.DATETIME, - equalTo(1683244800000L) - ) + return parameterSuppliersFromTypedDataWithDefaultChecks( + true, + List.of( + new TestCaseSupplier( + "Basic Case", + List.of(DataType.KEYWORD, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), + new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") ), - new TestCaseSupplier( - "With Text", - List.of(DataType.KEYWORD, DataType.TEXT), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), - new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.TEXT, "second") - ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", - DataType.DATETIME, - equalTo(1683244800000L) - ) + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + DataType.DATETIME, + equalTo(1683244800000L) + ) + ), + new TestCaseSupplier( + "With Text", + List.of(DataType.KEYWORD, DataType.TEXT), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), + new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.TEXT, "second") ), - new TestCaseSupplier( - "With Both Text", - List.of(DataType.TEXT, DataType.TEXT), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.TEXT, "first"), - new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.TEXT, "second") - ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", - DataType.DATETIME, - equalTo(1683244800000L) - ) + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + DataType.DATETIME, + equalTo(1683244800000L) + ) + ), + new TestCaseSupplier( + "With Both Text", + List.of(DataType.TEXT, DataType.TEXT), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.TEXT, "first"), + new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.TEXT, "second") ), - new TestCaseSupplier( - "With keyword", - List.of(DataType.TEXT, DataType.KEYWORD), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.TEXT, "first"), - new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") - ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", - DataType.DATETIME, - equalTo(1683244800000L) - ) + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + DataType.DATETIME, + equalTo(1683244800000L) + ) + ), + new TestCaseSupplier( + "With keyword", + List.of(DataType.TEXT, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.TEXT, "first"), + new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") ), - new TestCaseSupplier( - List.of(DataType.KEYWORD, DataType.KEYWORD), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("not a format"), DataType.KEYWORD, "first"), - new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + DataType.DATETIME, + equalTo(1683244800000L) + ) + ), + new TestCaseSupplier( + List.of(DataType.KEYWORD, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("not a format"), DataType.KEYWORD, "first"), + new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataType.KEYWORD, "second") - ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", - DataType.DATETIME, - is(nullValue()) - ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") - .withWarning( - "Line -1:-1: java.lang.IllegalArgumentException: Invalid format: " - + "[not a format]: Unknown pattern letter: o" - ) - .withFoldingException( - InvalidArgumentException.class, - "invalid date pattern for []: Invalid format: [not a format]: Unknown pattern letter: o" - ) ), - new TestCaseSupplier( - List.of(DataType.KEYWORD, DataType.KEYWORD), - () -> new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), - new TestCaseSupplier.TypedData(new BytesRef("not a date"), DataType.KEYWORD, "second") + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + DataType.DATETIME, + is(nullValue()) + ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") + .withWarning( + "Line -1:-1: java.lang.IllegalArgumentException: Invalid format: " + "[not a format]: Unknown pattern letter: o" + ) + .withFoldingException( + InvalidArgumentException.class, + "invalid date pattern for []: Invalid format: [not a format]: Unknown pattern letter: o" + ) + ), + new TestCaseSupplier( + List.of(DataType.KEYWORD, DataType.KEYWORD), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataType.KEYWORD, "first"), + new TestCaseSupplier.TypedData(new BytesRef("not a date"), DataType.KEYWORD, "second") - ), - "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", - DataType.DATETIME, - is(nullValue()) - ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") - .withWarning( - "Line -1:-1: java.lang.IllegalArgumentException: " - + "failed to parse date field [not a date] with format [yyyy-MM-dd]" - ) + ), + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", + DataType.DATETIME, + is(nullValue()) + ).withWarning("Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.") + .withWarning( + "Line -1:-1: java.lang.IllegalArgumentException: " + + "failed to parse date field [not a date] with format [yyyy-MM-dd]" ) - ) ) ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java index b627d7cd88908..4c5a7d3734ce3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTruncTests.java @@ -54,7 +54,7 @@ public static Iterable parameters() { ofDuration(Duration.ofSeconds(30), ts, "2023-02-17T10:25:30.00Z"), randomSecond() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } public void testCreateRoundingDuration() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchTests.java index 2aaca179b2bc4..0d8f4bc7ea115 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatchTests.java @@ -84,7 +84,7 @@ public static Iterable parameters() { ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixTests.java index d2b5e0a455229..a575eb48c4bd7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/IpPrefixTests.java @@ -106,7 +106,7 @@ public static Iterable parameters() { }) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, randomizeBytesRefsOffset(suppliers)))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java index 63642a01fa117..7bd195ab86389 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java @@ -63,7 +63,7 @@ public static Iterable parameters() { equalTo(Math.abs(arg)) ); })); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } public AbsTests(@Name("TestCase") Supplier testCaseSupplier) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java index 2266494391262..3b81316da5676 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java @@ -36,7 +36,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java index 8c7000940390b..c92c626a5601b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java @@ -33,7 +33,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java index 735113c34ca1b..ff61ecfa39687 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java @@ -66,7 +66,7 @@ public static Iterable parameters() { UNSIGNED_LONG_MAX, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java index 981c6812d1176..61e7a1f051905 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java @@ -33,7 +33,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java index cb666f03494e5..1ea63cc006e9c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java @@ -61,7 +61,7 @@ public static Iterable parameters() { ) ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java index 62c23369cc436..f0c990ec64af1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java @@ -50,7 +50,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java index ce23598bf980d..0d9bd6bcae64a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java @@ -33,7 +33,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java index 5d349e09aed2e..8f78e8ee67106 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java @@ -61,7 +61,7 @@ public static Iterable parameters() { ) ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java index c138fc12881fd..86c59a7a06cf4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java @@ -33,7 +33,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java index 585e75d05e378..1f4fef4ab15c8 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java @@ -33,7 +33,7 @@ public static Iterable parameters() { Double.POSITIVE_INFINITY, List.of() ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendTests.java index bc1a64da1cc73..f95747618dd28 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAppendTests.java @@ -13,18 +13,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.geo.GeometryTestUtils; import org.elasticsearch.geo.ShapeTestUtils; -import org.elasticsearch.geometry.Circle; -import org.elasticsearch.geometry.Geometry; -import org.elasticsearch.geometry.GeometryCollection; -import org.elasticsearch.geometry.GeometryVisitor; -import org.elasticsearch.geometry.Line; -import org.elasticsearch.geometry.LinearRing; -import org.elasticsearch.geometry.MultiLine; -import org.elasticsearch.geometry.MultiPoint; -import org.elasticsearch.geometry.MultiPolygon; -import org.elasticsearch.geometry.Point; -import org.elasticsearch.geometry.Polygon; -import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -247,25 +235,8 @@ private static void bytesRefs(List suppliers) { })); suppliers.add(new TestCaseSupplier(List.of(DataType.GEO_SHAPE, DataType.GEO_SHAPE), () -> { - GeometryPointCountVisitor pointCounter = new GeometryPointCountVisitor(); - List field1 = randomList( - 1, - 3, - () -> new BytesRef( - GEO.asWkt( - randomValueOtherThanMany(g -> g.visit(pointCounter) > 500, () -> GeometryTestUtils.randomGeometry(randomBoolean())) - ) - ) - ); - List field2 = randomList( - 1, - 3, - () -> new BytesRef( - GEO.asWkt( - randomValueOtherThanMany(g -> g.visit(pointCounter) > 500, () -> GeometryTestUtils.randomGeometry(randomBoolean())) - ) - ) - ); + var field1 = randomList(1, 3, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomGeometry(randomBoolean(), 500)))); + var field2 = randomList(1, 3, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomGeometry(randomBoolean(), 500)))); var result = new ArrayList<>(field1); result.addAll(field2); return new TestCaseSupplier.TestCase( @@ -280,25 +251,8 @@ private static void bytesRefs(List suppliers) { })); suppliers.add(new TestCaseSupplier(List.of(DataType.CARTESIAN_SHAPE, DataType.CARTESIAN_SHAPE), () -> { - GeometryPointCountVisitor pointCounter = new GeometryPointCountVisitor(); - List field1 = randomList( - 1, - 3, - () -> new BytesRef( - GEO.asWkt( - randomValueOtherThanMany(g -> g.visit(pointCounter) > 500, () -> ShapeTestUtils.randomGeometry(randomBoolean())) - ) - ) - ); - List field2 = randomList( - 1, - 3, - () -> new BytesRef( - GEO.asWkt( - randomValueOtherThanMany(g -> g.visit(pointCounter) > 500, () -> ShapeTestUtils.randomGeometry(randomBoolean())) - ) - ) - ); + var field1 = randomList(1, 3, () -> new BytesRef(CARTESIAN.asWkt(ShapeTestUtils.randomGeometry(randomBoolean(), 500)))); + var field2 = randomList(1, 3, () -> new BytesRef(CARTESIAN.asWkt(ShapeTestUtils.randomGeometry(randomBoolean(), 500)))); var result = new ArrayList<>(field1); result.addAll(field2); return new TestCaseSupplier.TestCase( @@ -339,65 +293,4 @@ private static void nulls(List suppliers) { ); })); } - - public static class GeometryPointCountVisitor implements GeometryVisitor { - - @Override - public Integer visit(Circle circle) throws RuntimeException { - return 2; - } - - @Override - public Integer visit(GeometryCollection collection) throws RuntimeException { - int size = 0; - for (Geometry geometry : collection) { - size += geometry.visit(this); - } - return size; - } - - @Override - public Integer visit(Line line) throws RuntimeException { - return line.length(); - } - - @Override - public Integer visit(LinearRing ring) throws RuntimeException { - return ring.length(); - } - - @Override - public Integer visit(MultiLine multiLine) throws RuntimeException { - return visit((GeometryCollection) multiLine); - } - - @Override - public Integer visit(MultiPoint multiPoint) throws RuntimeException { - return multiPoint.size(); - } - - @Override - public Integer visit(MultiPolygon multiPolygon) throws RuntimeException { - return visit((GeometryCollection) multiPolygon); - } - - @Override - public Integer visit(Point point) throws RuntimeException { - return 1; - } - - @Override - public Integer visit(Polygon polygon) throws RuntimeException { - int size = polygon.getPolygon().length(); - for (int i = 0; i < polygon.getNumberOfHoles(); i++) { - size += polygon.getHole(i).length(); - } - return size; - } - - @Override - public Integer visit(Rectangle rectangle) throws RuntimeException { - return 4; - } - } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java index 966a5a590e256..43c683040eac4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java @@ -55,7 +55,7 @@ public static Iterable parameters() { */ (size, data) -> avg.apply(size, data.mapToDouble(v -> unsignedLongToDouble(NumericUtils.asLongUnsigned(v)))) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java index 39ef5eefe9287..ba4ddb1be84cc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java @@ -68,7 +68,7 @@ public static Iterable parameters() { } } } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java index 8733dc0d25c40..8c8772f8ed4e2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java @@ -40,7 +40,7 @@ public static Iterable parameters() { cartesianPoints(cases, "mv_count", "MvCount", DataType.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); geoShape(cases, "mv_count", "MvCount", DataType.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); cartesianShape(cases, "mv_count", "MvCount", DataType.INTEGER, (size, values) -> equalTo(Math.toIntExact(values.count()))); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java index 1c24b1a8aae64..6e143d9175f41 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvFirstTests.java @@ -41,7 +41,7 @@ public static Iterable parameters() { cartesianPoints(cases, "mv_first", "MvFirst", DataType.CARTESIAN_POINT, (size, values) -> equalTo(values.findFirst().get())); geoShape(cases, "mv_first", "MvFirst", DataType.GEO_SHAPE, (size, values) -> equalTo(values.findFirst().get())); cartesianShape(cases, "mv_first", "MvFirst", DataType.CARTESIAN_SHAPE, (size, values) -> equalTo(values.findFirst().get())); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java index 1b6fb482ea3d0..83d94f2cc9884 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvLastTests.java @@ -41,7 +41,7 @@ public static Iterable parameters() { cartesianPoints(cases, "mv_last", "MvLast", DataType.CARTESIAN_POINT, (size, values) -> equalTo(values.reduce((f, s) -> s).get())); geoShape(cases, "mv_last", "MvLast", DataType.GEO_SHAPE, (size, values) -> equalTo(values.reduce((f, s) -> s).get())); cartesianShape(cases, "mv_last", "MvLast", DataType.CARTESIAN_SHAPE, (size, values) -> equalTo(values.reduce((f, s) -> s).get())); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java index 5af662c2642cc..63530234e53fa 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java @@ -38,7 +38,7 @@ public static Iterable parameters() { longs(cases, "mv_max", "MvMax", (size, values) -> equalTo(values.max().getAsLong())); unsignedLongs(cases, "mv_max", "MvMax", (size, values) -> equalTo(values.reduce(BigInteger::max).get())); dateTimes(cases, "mv_max", "MvMax", (size, values) -> equalTo(values.max().getAsLong())); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java index 4c324c916f861..f44f5d44e3f62 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java @@ -92,7 +92,7 @@ public static Iterable parameters() { ) ) ); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java index 6f398c8a7ac92..5be67548f784e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java @@ -38,7 +38,7 @@ public static Iterable parameters() { longs(cases, "mv_min", "MvMin", (size, values) -> equalTo(values.min().getAsLong())); unsignedLongs(cases, "mv_min", "MvMin", (size, values) -> equalTo(values.reduce(BigInteger::min).get())); dateTimes(cases, "mv_min", "MvMin", (size, values) -> equalTo(values.min().getAsLong())); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, cases); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java index 0550be25f9d91..3f6fb841f006f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java @@ -306,16 +306,7 @@ private static void bytesRefs(List suppliers) { })); suppliers.add(new TestCaseSupplier(List.of(DataType.GEO_SHAPE, DataType.INTEGER, DataType.INTEGER), () -> { - var pointCounter = new MvAppendTests.GeometryPointCountVisitor(); - List field = randomList( - 1, - 5, - () -> new BytesRef( - GEO.asWkt( - randomValueOtherThanMany(g -> g.visit(pointCounter) > 500, () -> GeometryTestUtils.randomGeometry(randomBoolean())) - ) - ) - ); + var field = randomList(1, 5, () -> new BytesRef(GEO.asWkt(GeometryTestUtils.randomGeometry(randomBoolean(), 500)))); int length = field.size(); int start = randomIntBetween(0, length - 1); int end = randomIntBetween(start, length - 1); @@ -332,16 +323,7 @@ private static void bytesRefs(List suppliers) { })); suppliers.add(new TestCaseSupplier(List.of(DataType.CARTESIAN_SHAPE, DataType.INTEGER, DataType.INTEGER), () -> { - var pointCounter = new MvAppendTests.GeometryPointCountVisitor(); - List field = randomList( - 1, - 5, - () -> new BytesRef( - CARTESIAN.asWkt( - randomValueOtherThanMany(g -> g.visit(pointCounter) > 500, () -> GeometryTestUtils.randomGeometry(randomBoolean())) - ) - ) - ); + var field = randomList(1, 5, () -> new BytesRef(CARTESIAN.asWkt(GeometryTestUtils.randomGeometry(randomBoolean(), 500)))); int length = field.size(); int start = randomIntBetween(0, length - 1); int end = randomIntBetween(start, length - 1); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java index b466ffe1e92f1..fa0fc8465ce7a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StXTests.java @@ -36,7 +36,7 @@ public static Iterable parameters() { final List suppliers = new ArrayList<>(); TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedEvaluator, DOUBLE, StXTests::valueOf, List.of()); TestCaseSupplier.forUnaryCartesianPoint(suppliers, expectedEvaluator, DOUBLE, StXTests::valueOf, List.of()); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } private static double valueOf(BytesRef wkb) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java index 1f3639bf1ecb4..15f34271be779 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/StYTests.java @@ -36,7 +36,7 @@ public static Iterable parameters() { final List suppliers = new ArrayList<>(); TestCaseSupplier.forUnaryGeoPoint(suppliers, expectedEvaluator, DOUBLE, StYTests::valueOf, List.of()); TestCaseSupplier.forUnaryCartesianPoint(suppliers, expectedEvaluator, DOUBLE, StYTests::valueOf, List.of()); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } private static double valueOf(BytesRef wkb) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java index f44a51b0e53bb..27e3fc8684efc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java @@ -67,7 +67,7 @@ static Iterable parameters(String name, boolean trimLeading, boolean t })); } } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } private static TestCaseSupplier.TestCase testCase(String name, DataType type, String data, String expected) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java index e6a5d30d0fa53..7d6e3439c8063 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java @@ -167,7 +167,7 @@ public static Iterable parameters() { ); })); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } private static String unicodeLeftSubstring(String str, int length) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java index 81fcc118ade05..4a7e6b3a0996d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java @@ -49,7 +49,7 @@ public static Iterable parameters() { cases.addAll(makeTestCases("6 bytes, 2 code points", () -> "❗️", 2)); cases.addAll(makeTestCases("100 random alpha", () -> randomAlphaOfLength(100), 100)); cases.addAll(makeTestCases("100 random code points", () -> randomUnicodeOfCodepointLength(100), 100)); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, cases))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, cases); } private static List makeTestCases(String title, Supplier text, int expectedLength) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java index bfadf66f7f5cc..82581b69f8713 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ReplaceTests.java @@ -103,7 +103,7 @@ public static Iterable parameters() { "Unclosed character class near index 0\n[\n^".replaceAll("\n", System.lineSeparator()) ); })); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } private static TestCaseSupplier fixedCase(String name, String str, String oldStr, String newStr, String result) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java index 599ab51995217..9d2b55e02fff7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java @@ -166,7 +166,7 @@ public static Iterable parameters() { equalTo(new BytesRef(unicodeRightSubstring(text, length))) ); })); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } private static String unicodeRightSubstring(String str, int length) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java index 47e48df90007e..bf2dd0359a352 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java @@ -64,7 +64,7 @@ public static Iterable parameters() { })); } } - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(true, suppliers); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java index c1a49455d9d83..0ee60cfc77d2f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java @@ -36,61 +36,56 @@ public SubstringTests(@Name("TestCase") Supplier test @ParametersFactory public static Iterable parameters() { - return parameterSuppliersFromTypedData( - errorsForCasesWithoutExamples( - anyNullIsNull( - true, + return parameterSuppliersFromTypedDataWithDefaultChecks( + true, + List.of(new TestCaseSupplier("Substring basic test", List.of(DataType.KEYWORD, DataType.INTEGER, DataType.INTEGER), () -> { + int start = between(1, 8); + int length = between(1, 10 - start); + String text = randomAlphaOfLength(10); + return new TestCaseSupplier.TestCase( List.of( - new TestCaseSupplier("Substring basic test", List.of(DataType.KEYWORD, DataType.INTEGER, DataType.INTEGER), () -> { - int start = between(1, 8); - int length = between(1, 10 - start); - String text = randomAlphaOfLength(10); - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef(text), DataType.KEYWORD, "str"), - new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), - new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end") - ), - "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", - DataType.KEYWORD, - equalTo(new BytesRef(text.substring(start - 1, start + length - 1))) - ); - }), - new TestCaseSupplier( - "Substring basic test with text input", - List.of(DataType.TEXT, DataType.INTEGER, DataType.INTEGER), - () -> { - int start = between(1, 8); - int length = between(1, 10 - start); - String text = randomAlphaOfLength(10); - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef(text), DataType.TEXT, "str"), - new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), - new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end") - ), - "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", - DataType.KEYWORD, - equalTo(new BytesRef(text.substring(start - 1, start + length - 1))) - ); - } + new TestCaseSupplier.TypedData(new BytesRef(text), DataType.KEYWORD, "str"), + new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), + new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end") + ), + "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", + DataType.KEYWORD, + equalTo(new BytesRef(text.substring(start - 1, start + length - 1))) + ); + }), + new TestCaseSupplier( + "Substring basic test with text input", + List.of(DataType.TEXT, DataType.INTEGER, DataType.INTEGER), + () -> { + int start = between(1, 8); + int length = between(1, 10 - start); + String text = randomAlphaOfLength(10); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataType.TEXT, "str"), + new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), + new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end") + ), + "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", + DataType.KEYWORD, + equalTo(new BytesRef(text.substring(start - 1, start + length - 1))) + ); + } + ), + new TestCaseSupplier("Substring empty string", List.of(DataType.TEXT, DataType.INTEGER, DataType.INTEGER), () -> { + int start = between(1, 8); + int length = between(1, 10 - start); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(""), DataType.TEXT, "str"), + new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), + new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end") ), - new TestCaseSupplier("Substring empty string", List.of(DataType.TEXT, DataType.INTEGER, DataType.INTEGER), () -> { - int start = between(1, 8); - int length = between(1, 10 - start); - return new TestCaseSupplier.TestCase( - List.of( - new TestCaseSupplier.TypedData(new BytesRef(""), DataType.TEXT, "str"), - new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), - new TestCaseSupplier.TypedData(length, DataType.INTEGER, "end") - ), - "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", - DataType.KEYWORD, - equalTo(new BytesRef("")) - ); - }) - ) - ) + "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", + DataType.KEYWORD, + equalTo(new BytesRef("")) + ); + }) ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java index 99b2b38aa8611..abb419e1e4a81 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToLowerTests.java @@ -47,7 +47,7 @@ public static Iterable parameters() { suppliers.add(supplier("text unicode", DataType.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); // add null as parameter - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } public void testRandomLocale() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java index 7b8e6abcdb3db..f101cacd73dc5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ToUpperTests.java @@ -47,7 +47,7 @@ public static Iterable parameters() { suppliers.add(supplier("text unicode", DataType.TEXT, () -> randomUnicodeOfLengthBetween(1, 10))); // add null as parameter - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } public void testRandomLocale() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java index c2a9766c23cbe..a628416ecc4b7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java @@ -110,7 +110,7 @@ public static Iterable parameters() { equalTo(arg.negated()) ); }))); - return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); + return parameterSuppliersFromTypedDataWithDefaultChecks(false, suppliers); } @Override diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecycleOperationSnapshotTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecycleOperationSnapshotTests.java index 54b9cdca98393..93d5587153181 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecycleOperationSnapshotTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/LifecycleOperationSnapshotTests.java @@ -58,7 +58,10 @@ protected Settings nodeSettings() { } public void testModeSnapshotRestore() throws Exception { - clusterAdmin().preparePutRepository("repo").setType("fs").setSettings(Settings.builder().put("location", "repo").build()).get(); + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo") + .setType("fs") + .setSettings(Settings.builder().put("location", "repo").build()) + .get(); client().execute( PutSnapshotLifecycleAction.INSTANCE, @@ -104,7 +107,7 @@ public void testModeSnapshotRestore() throws Exception { try { GetSnapshotsResponse getResp = client().execute( TransportGetSnapshotsAction.TYPE, - new GetSnapshotsRequest(new String[] { "repo" }, new String[] { snapshotName }) + new GetSnapshotsRequest(TEST_REQUEST_TIMEOUT, new String[] { "repo" }, new String[] { snapshotName }) ).get(); assertThat(getResp.getSnapshots().size(), equalTo(1)); assertThat(getResp.getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); @@ -121,7 +124,9 @@ public void testModeSnapshotRestore() throws Exception { // Restore snapshot client().execute( TransportRestoreSnapshotAction.TYPE, - new RestoreSnapshotRequest("repo", snapshotName).includeGlobalState(true).indices(Strings.EMPTY_ARRAY).waitForCompletion(true) + new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, "repo", snapshotName).includeGlobalState(true) + .indices(Strings.EMPTY_ARRAY) + .waitForCompletion(true) ).get(); assertBusy(() -> assertThat(ilmMode(), equalTo(OperationMode.STOPPED))); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java index 58603526a9c56..a352116278e7a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/Utils.java @@ -30,6 +30,7 @@ import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -159,4 +160,5 @@ public static SimilarityMeasure randomSimilarityMeasure() { return randomFrom(SimilarityMeasure.values()); } + public record PersistedConfig(Map config, Map secrets) {} } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java index 709cc4d3494fd..d26b02ddba62b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java @@ -60,6 +60,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -1068,28 +1069,18 @@ private Map getRequestConfigMap( ); } - private record PeristedConfigRecord(Map config, Map secrets) {} - - private PeristedConfigRecord getPersistedConfigMap( + private PersistedConfig getPersistedConfigMap( Map serviceSettings, Map taskSettings, Map secretSettings ) { - return new PeristedConfigRecord( + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) ); } - private PeristedConfigRecord getPersistedConfigMap(Map serviceSettings, Map taskSettings) { - - return new PeristedConfigRecord( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - null - ); - } - private static Map getEmbeddingsServiceSettingsMap( String target, String provider, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionTaskSettingsTests.java index bc541bbcf5369..d48068d5a4008 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/completion/AzureAiStudioChatCompletionTaskSettingsTests.java @@ -7,13 +7,15 @@ package org.elasticsearch.xpack.inference.services.azureaistudio.completion; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; import org.hamcrest.MatcherAssert; import java.io.IOException; @@ -27,7 +29,8 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -public class AzureAiStudioChatCompletionTaskSettingsTests extends ESTestCase { +public class AzureAiStudioChatCompletionTaskSettingsTests extends AbstractBWCWireSerializationTestCase< + AzureAiStudioChatCompletionTaskSettings> { public void testFromMap_AllValues() { var taskMap = getTaskSettingsMap(1.0, 2.0, true, 512); @@ -183,4 +186,36 @@ public static Map getTaskSettingsMap( return map; } + + @Override + protected Writeable.Reader instanceReader() { + return AzureAiStudioChatCompletionTaskSettings::new; + } + + @Override + protected AzureAiStudioChatCompletionTaskSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AzureAiStudioChatCompletionTaskSettings mutateInstance(AzureAiStudioChatCompletionTaskSettings instance) throws IOException { + return randomValueOtherThan(instance, AzureAiStudioChatCompletionTaskSettingsTests::createRandom); + } + + @Override + protected AzureAiStudioChatCompletionTaskSettings mutateInstanceForVersion( + AzureAiStudioChatCompletionTaskSettings instance, + TransportVersion version + ) { + return instance; + } + + private static AzureAiStudioChatCompletionTaskSettings createRandom() { + return new AzureAiStudioChatCompletionTaskSettings( + randomFrom(new Double[] { null, randomDouble() }), + randomFrom(new Double[] { null, randomDouble() }), + randomFrom(randomFrom(new Boolean[] { null, randomBoolean() })), + randomFrom(new Integer[] { null, randomNonNegativeInt() }) + ); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettingsTests.java index a592dd6e1f956..05388192b2f14 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsServiceSettingsTests.java @@ -7,20 +7,23 @@ package org.elasticsearch.xpack.inference.services.azureaistudio.embeddings; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; import org.elasticsearch.inference.SimilarityMeasure; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; import org.elasticsearch.xpack.inference.services.ConfigurationParseContext; import org.elasticsearch.xpack.inference.services.ServiceFields; import org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants; import org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioEndpointType; import org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioProvider; import org.elasticsearch.xpack.inference.services.settings.RateLimitSettings; +import org.elasticsearch.xpack.inference.services.settings.RateLimitSettingsTests; import org.hamcrest.CoreMatchers; import org.hamcrest.MatcherAssert; @@ -32,7 +35,8 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -public class AzureAiStudioEmbeddingsServiceSettingsTests extends ESTestCase { +public class AzureAiStudioEmbeddingsServiceSettingsTests extends AbstractBWCWireSerializationTestCase< + AzureAiStudioEmbeddingsServiceSettings> { public void testFromMap_Request_CreatesSettingsCorrectly() { var target = "http://sometarget.local"; @@ -336,4 +340,40 @@ public static HashMap createRequestSettingsMap( return map; } + + @Override + protected Writeable.Reader instanceReader() { + return AzureAiStudioEmbeddingsServiceSettings::new; + } + + @Override + protected AzureAiStudioEmbeddingsServiceSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AzureAiStudioEmbeddingsServiceSettings mutateInstance(AzureAiStudioEmbeddingsServiceSettings instance) throws IOException { + return randomValueOtherThan(instance, AzureAiStudioEmbeddingsServiceSettingsTests::createRandom); + } + + @Override + protected AzureAiStudioEmbeddingsServiceSettings mutateInstanceForVersion( + AzureAiStudioEmbeddingsServiceSettings instance, + TransportVersion version + ) { + return instance; + } + + private static AzureAiStudioEmbeddingsServiceSettings createRandom() { + return new AzureAiStudioEmbeddingsServiceSettings( + randomAlphaOfLength(10), + randomFrom(AzureAiStudioProvider.values()), + randomFrom(AzureAiStudioEndpointType.values()), + randomFrom(new Integer[] { null, randomNonNegativeInt() }), + randomBoolean(), + randomFrom(new Integer[] { null, randomNonNegativeInt() }), + randomFrom(new SimilarityMeasure[] { null, randomFrom(SimilarityMeasure.values()) }), + RateLimitSettingsTests.createRandom() + ); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsTaskSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsTaskSettingsTests.java index 3d1b7f0c7499c..5b8c95edcc0ad 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsTaskSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/embeddings/AzureAiStudioEmbeddingsTaskSettingsTests.java @@ -7,13 +7,15 @@ package org.elasticsearch.xpack.inference.services.azureaistudio.embeddings; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; import org.elasticsearch.xpack.inference.services.azureaistudio.AzureAiStudioConstants; import org.hamcrest.MatcherAssert; @@ -23,7 +25,7 @@ import static org.hamcrest.Matchers.is; -public class AzureAiStudioEmbeddingsTaskSettingsTests extends ESTestCase { +public class AzureAiStudioEmbeddingsTaskSettingsTests extends AbstractBWCWireSerializationTestCase { public void testFromMap_WithUser() { assertEquals( @@ -98,4 +100,31 @@ public static Map getTaskSettingsMap(@Nullable String user) { } return map; } + + @Override + protected Writeable.Reader instanceReader() { + return AzureAiStudioEmbeddingsTaskSettings::new; + } + + @Override + protected AzureAiStudioEmbeddingsTaskSettings createTestInstance() { + return createRandom(); + } + + @Override + protected AzureAiStudioEmbeddingsTaskSettings mutateInstance(AzureAiStudioEmbeddingsTaskSettings instance) throws IOException { + return randomValueOtherThan(instance, AzureAiStudioEmbeddingsTaskSettingsTests::createRandom); + } + + @Override + protected AzureAiStudioEmbeddingsTaskSettings mutateInstanceForVersion( + AzureAiStudioEmbeddingsTaskSettings instance, + TransportVersion version + ) { + return instance; + } + + private static AzureAiStudioEmbeddingsTaskSettings createRandom() { + return new AzureAiStudioEmbeddingsTaskSettings(randomFrom(new String[] { null, randomAlphaOfLength(15) })); + } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java index de474ea1b4237..c3e8eb5c621d2 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java @@ -54,6 +54,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -394,7 +395,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSe getAzureOpenAiRequestTaskSettingsMap("user"), getAzureOpenAiSecretSettingsMap("secret", null) ); - persistedConfig.secrets.put("extra_key", "value"); + persistedConfig.secrets().put("extra_key", "value"); var model = service.parsePersistedConfigWithSecrets( "id", @@ -1159,25 +1160,22 @@ private Map getRequestConfigMap( ); } - private PeristedConfig getPersistedConfigMap( + private PersistedConfig getPersistedConfigMap( Map serviceSettings, Map taskSettings, Map secretSettings ) { - return new PeristedConfig( + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) ); } - private PeristedConfig getPersistedConfigMap(Map serviceSettings, Map taskSettings) { - - return new PeristedConfig( + private PersistedConfig getPersistedConfigMap(Map serviceSettings, Map taskSettings) { + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), null ); } - - private record PeristedConfig(Map config, Map secrets) {} } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java index 5b3cb9eade9de..e28ca71c30ff8 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java @@ -58,6 +58,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -421,7 +422,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSe getTaskSettingsMap(null, null), getSecretSettingsMap("secret") ); - persistedConfig.secrets.put("extra_key", "value"); + persistedConfig.secrets().put("extra_key", "value"); var model = service.parsePersistedConfigWithSecrets( "id", @@ -1394,6 +1395,4 @@ private PersistedConfig getPersistedConfigMap(Map serviceSetting null ); } - - private record PersistedConfig(Map config, Map secrets) {} } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java index 1cdd7997b96c0..45dd8ad7b33bd 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/googleaistudio/GoogleAiStudioServiceTests.java @@ -57,6 +57,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -963,7 +964,4 @@ private PersistedConfig getPersistedConfigMap(Map serviceSetting null ); } - - private record PersistedConfig(Map config, Map secrets) {} - } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java index a855437ce0738..de5c7ec83d57e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/huggingface/HuggingFaceServiceTests.java @@ -56,6 +56,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.xpack.core.inference.results.InferenceChunkedTextEmbeddingFloatResultsTests.asMapWithListsInsteadOfArrays; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; @@ -278,7 +279,7 @@ public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExists public void testParsePersistedConfigWithSecrets_DoesNotThrowWhenAnExtraKeyExistsInSecrets() throws IOException { try (var service = createHuggingFaceService()) { var persistedConfig = getPersistedConfigMap(getServiceSettingsMap("url"), getSecretSettingsMap("secret")); - persistedConfig.secrets.put("extra_key", "value"); + persistedConfig.secrets().put("extra_key", "value"); var model = service.parsePersistedConfigWithSecrets( "id", @@ -711,18 +712,15 @@ private Map getRequestConfigMap(Map serviceSetti return new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, builtServiceSettings)); } - private HuggingFaceServiceTests.PeristedConfig getPersistedConfigMap(Map serviceSettings) { + private PersistedConfig getPersistedConfigMap(Map serviceSettings) { return getPersistedConfigMap(serviceSettings, Map.of(), null); } - private HuggingFaceServiceTests.PeristedConfig getPersistedConfigMap( - Map serviceSettings, - @Nullable Map secretSettings - ) { + private PersistedConfig getPersistedConfigMap(Map serviceSettings, @Nullable Map secretSettings) { return getPersistedConfigMap(serviceSettings, Map.of(), secretSettings); } - private HuggingFaceServiceTests.PeristedConfig getPersistedConfigMap( + private PersistedConfig getPersistedConfigMap( Map serviceSettings, Map taskSettings, Map secretSettings @@ -730,11 +728,9 @@ private HuggingFaceServiceTests.PeristedConfig getPersistedConfigMap( var secrets = secretSettings == null ? null : new HashMap(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)); - return new HuggingFaceServiceTests.PeristedConfig( + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), secrets ); } - - private record PeristedConfig(Map config, Map secrets) {} } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java index 508d5a97fe564..ba37203d9e5d6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/mistral/MistralServiceTests.java @@ -54,6 +54,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -586,28 +587,18 @@ private Map getRequestConfigMap( ); } - private record PeristedConfigRecord(Map config, Map secrets) {} - - private PeristedConfigRecord getPersistedConfigMap( + private PersistedConfig getPersistedConfigMap( Map serviceSettings, Map taskSettings, Map secretSettings ) { - return new PeristedConfigRecord( + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) ); } - private PeristedConfigRecord getPersistedConfigMap(Map serviceSettings, Map taskSettings) { - - return new PeristedConfigRecord( - new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), - null - ); - } - private static Map getEmbeddingsServiceSettingsMap( String model, @Nullable Integer dimensions, diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index e0e1ee3e81aef..2fc049dd3a5f6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -55,6 +55,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.xpack.inference.Utils.PersistedConfig; import static org.elasticsearch.xpack.inference.Utils.getInvalidModel; import static org.elasticsearch.xpack.inference.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.Utils.mockClusterServiceEmpty; @@ -480,7 +481,7 @@ public void testParsePersistedConfigWithSecrets_NotThrowWhenAnExtraKeyExistsInSe getTaskSettingsMap("user"), getSecretSettingsMap("secret") ); - persistedConfig.secrets.put("extra_key", "value"); + persistedConfig.secrets().put("extra_key", "value"); var model = service.parsePersistedConfigWithSecrets( "id", @@ -1308,25 +1309,23 @@ private Map getRequestConfigMap( ); } - private PeristedConfig getPersistedConfigMap( + private PersistedConfig getPersistedConfigMap( Map serviceSettings, Map taskSettings, Map secretSettings ) { - return new PeristedConfig( + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), new HashMap<>(Map.of(ModelSecrets.SECRET_SETTINGS, secretSettings)) ); } - private PeristedConfig getPersistedConfigMap(Map serviceSettings, Map taskSettings) { + private PersistedConfig getPersistedConfigMap(Map serviceSettings, Map taskSettings) { - return new PeristedConfig( + return new PersistedConfig( new HashMap<>(Map.of(ModelConfigurations.SERVICE_SETTINGS, serviceSettings, ModelConfigurations.TASK_SETTINGS, taskSettings)), null ); } - - private record PeristedConfig(Map config, Map secrets) {} } diff --git a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java index 81abe3dc5c088..33efabf101be7 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/main/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapper.java @@ -19,7 +19,6 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.fielddata.FieldDataContext; @@ -28,9 +27,10 @@ import org.elasticsearch.index.fielddata.ScriptDocValues.DoublesSupplier; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.index.mapper.CompositeSyntheticFieldLoader; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.IgnoredSourceFieldMapper; +import org.elasticsearch.index.mapper.IgnoreMalformedStoredValues; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperBuilderContext; @@ -43,7 +43,6 @@ import org.elasticsearch.index.mapper.TimeSeriesParams; import org.elasticsearch.index.mapper.TimeSeriesParams.MetricType; import org.elasticsearch.index.mapper.ValueFetcher; -import org.elasticsearch.index.mapper.XContentDataHelper; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.script.ScriptCompiler; @@ -53,6 +52,7 @@ import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.sort.BucketedSort; import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.xcontent.CopyingXContentParser; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentSubParser; @@ -592,9 +592,7 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio EnumMap metricsParsed = new EnumMap<>(Metric.class); // Preserves the content of the field in order to be able to construct synthetic source // if field value is malformed. - XContentBuilder malformedContentForSyntheticSource = context.mappingLookup().isSourceSynthetic() && ignoreMalformed - ? XContentBuilder.builder(context.parser().contentType().xContent()) - : null; + XContentBuilder malformedDataForSyntheticSource = null; try { token = context.parser().currentToken(); @@ -603,11 +601,14 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio return; } ensureExpectedToken(XContentParser.Token.START_OBJECT, token, context.parser()); - subParser = new XContentSubParser(context.parser()); - token = subParser.nextToken(); - if (malformedContentForSyntheticSource != null) { - malformedContentForSyntheticSource.startObject(); + if (context.mappingLookup().isSourceSynthetic() && ignoreMalformed) { + var copyingParser = new CopyingXContentParser(context.parser()); + malformedDataForSyntheticSource = copyingParser.getBuilder(); + subParser = new XContentSubParser(copyingParser); + } else { + subParser = new XContentSubParser(context.parser()); } + token = subParser.nextToken(); while (token != XContentParser.Token.END_OBJECT) { // should be an object sub-field with name a metric name ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, subParser); @@ -621,9 +622,6 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } token = subParser.nextToken(); - if (malformedContentForSyntheticSource != null) { - malformedContentForSyntheticSource.field(fieldName); - } // Make sure that the value is a number. Probably this will change when // new aggregate metric types are added (histogram, cardinality etc) ensureExpectedToken(XContentParser.Token.VALUE_NUMBER, token, subParser); @@ -632,9 +630,6 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio try { Number metricValue = delegateFieldMapper.value(context.parser()); metricsParsed.put(metric, metricValue); - if (malformedContentForSyntheticSource != null) { - malformedContentForSyntheticSource.value(metricValue); - } } catch (IllegalArgumentException e) { throw new IllegalArgumentException("failed to parse [" + metric.name() + "] sub field: " + e.getMessage(), e); } @@ -677,24 +672,20 @@ protected void parseCreateField(DocumentParserContext context) throws IOExceptio } } catch (Exception e) { if (ignoreMalformed) { - if (malformedContentForSyntheticSource != null) { - if (subParser != null) { - // Remaining data in parser needs to be stored as is in order to provide it in synthetic source. - XContentHelper.drainAndClose(subParser, malformedContentForSyntheticSource); - } else { - // We don't use DrainingXContentParser since we don't want to go beyond current field - malformedContentForSyntheticSource.copyCurrentStructure(context.parser()); - } - ; - var nameValue = IgnoredSourceFieldMapper.NameValue.fromContext( - context, - name(), - XContentDataHelper.encodeXContentBuilder(malformedContentForSyntheticSource) - ); - context.addIgnoredField(nameValue); - } else if (subParser != null) { + if (subParser != null) { // close the subParser, so we advance to the end of the object subParser.close(); + } else { + if (context.mappingLookup().isSourceSynthetic()) { + // There is a malformed value, but it is not an object (since subParser is null). + // So we just need to copy this single value. + malformedDataForSyntheticSource = XContentBuilder.builder(context.parser().contentType().xContent()) + .copyCurrentStructure(context.parser()); + } + } + + if (malformedDataForSyntheticSource != null) { + context.doc().add(IgnoreMalformedStoredValues.storedField(name(), malformedDataForSyntheticSource)); } context.addIgnoredField(name()); @@ -724,11 +715,15 @@ protected SyntheticSourceMode syntheticSourceMode() { @Override public SourceLoader.SyntheticFieldLoader syntheticFieldLoader() { - // Note that malformed values are handled via `IgnoredSourceFieldMapper` infrastructure - return new AggregateMetricSyntheticFieldLoader(name(), simpleName(), metrics); + return new CompositeSyntheticFieldLoader( + simpleName(), + name(), + new AggregateMetricSyntheticFieldLoader(name(), simpleName(), metrics), + new CompositeSyntheticFieldLoader.MalformedValuesLayer(name()) + ); } - public static class AggregateMetricSyntheticFieldLoader implements SourceLoader.SyntheticFieldLoader { + public static class AggregateMetricSyntheticFieldLoader implements CompositeSyntheticFieldLoader.SyntheticFieldLoaderLayer { private final String name; private final String simpleName; private final EnumSet metrics; @@ -746,6 +741,11 @@ public String fieldName() { return name; } + @Override + public long valueCount() { + return hasValue() ? 1 : 0; + } + @Override public Stream> storedFieldLoaders() { return Stream.of(); @@ -779,7 +779,7 @@ public void write(XContentBuilder b) throws IOException { if (metricHasValue.isEmpty()) { return; } - b.startObject(simpleName); + b.startObject(); for (Map.Entry entry : metricDocValues.entrySet()) { if (metricHasValue.contains(entry.getKey())) { String metricName = entry.getKey().name(); diff --git a/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapperTests.java b/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapperTests.java index f46508093c4ec..5fbc25eb037a7 100644 --- a/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapperTests.java +++ b/x-pack/plugin/mapper-aggregate-metric/src/test/java/org/elasticsearch/xpack/aggregatemetric/mapper/AggregateDoubleMetricFieldMapperTests.java @@ -8,6 +8,8 @@ import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.Query; +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.index.mapper.LuceneDocument; @@ -20,6 +22,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.aggregatemetric.AggregateMetricMapperPlugin; import org.elasticsearch.xpack.aggregatemetric.mapper.AggregateDoubleMetricFieldMapper.Metric; import org.hamcrest.Matchers; @@ -523,6 +526,43 @@ protected IngestScriptSupport ingestScriptSupport() { throw new AssumptionViolatedException("not supported"); } + public void testArrayValueSyntheticSource() throws Exception { + DocumentMapper mapper = createDocumentMapper( + syntheticSourceFieldMapping( + b -> b.field("type", CONTENT_TYPE) + .array("metrics", "min", "max") + .field("default_metric", "min") + .field("ignore_malformed", "true") + ) + ); + + var randomString = randomAlphaOfLength(10); + CheckedConsumer arrayValue = b -> { + b.startArray("field"); + { + b.startObject().field("max", 100).field("min", 10).endObject(); + b.startObject().field("max", 200).field("min", 20).endObject(); + b.value(randomString); + } + b.endArray(); + }; + + var expected = JsonXContent.contentBuilder().startObject(); + // First value comes from synthetic field loader and so is formatted in a specific format (e.g. min always come first). + // Other values are stored as is as part of ignore_malformed logic for synthetic source. + { + expected.startArray("field"); + expected.startObject().field("min", 10.0).field("max", 100.0).endObject(); + expected.startObject().field("max", 200).field("min", 20).endObject(); + expected.value(randomString); + expected.endArray(); + } + expected.endObject(); + + var syntheticSource = syntheticSource(mapper, arrayValue); + assertEquals(Strings.toString(expected), syntheticSource); + } + protected final class AggregateDoubleMetricSyntheticSourceSupport implements SyntheticSourceSupport { private final boolean malformedExample; private final EnumSet storedMetrics; diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java index 861d5a8c2f592..afd150be5fd4c 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java @@ -270,7 +270,7 @@ protected Set excludeTemplates() { } protected void cleanUpResources() { - client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).actionGet(); + client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); } protected void setUpgradeModeTo(boolean enabled) { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java index 19dad8db8ef01..1b1ad986bc8a1 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/TestFeatureResetIT.java @@ -134,7 +134,7 @@ public void testMLFeatureReset() throws Exception { createdPipelines.remove("feature_reset_inference_pipeline"); assertBusy(() -> assertThat(countInferenceProcessors(clusterAdmin().prepareState().get().getState()), equalTo(0))); - client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).actionGet(); + client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertBusy(() -> { List indices = Arrays.asList(client().admin().indices().prepareGetIndex().addIndices(".ml*").get().indices()); assertThat(indices.toString(), indices, is(empty())); @@ -150,7 +150,7 @@ public void testMLFeatureResetFailureDueToPipelines() throws Exception { createdPipelines.add("feature_reset_failure_inference_pipeline"); Exception ex = expectThrows( Exception.class, - () -> client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).actionGet() + () -> client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet() ); assertThat( ex.getMessage(), @@ -166,7 +166,7 @@ public void testMLFeatureResetFailureDueToPipelines() throws Exception { public void testMLFeatureResetWithModelDeployment() throws Exception { createModelDeployment(); - client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).actionGet(); + client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); assertBusy(() -> { List indices = Arrays.asList(client().admin().indices().prepareGetIndex().addIndices(".ml*").get().indices()); assertThat(indices.toString(), indices, is(empty())); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java index 18cbf1728b0e4..c6e573fb3ea9c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java @@ -112,7 +112,7 @@ protected Collection> getPlugins() { public void tearDown() throws Exception { try { logger.trace("[{}#{}]: ML-specific after test cleanup", getTestClass().getSimpleName(), getTestName()); - client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).actionGet(); + client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); } finally { super.tearDown(); } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java index 277a395471cb5..12eeaf8732235 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistry.java @@ -77,7 +77,7 @@ public class MonitoringTemplateRegistry extends IndexTemplateRegistry { * writes monitoring data in ECS format as of 8.0. These templates define the ECS schema as well as alias fields for the old monitoring * mappings that point to the corresponding ECS fields. */ - public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 16; + public static final int STACK_MONITORING_REGISTRY_VERSION = 8_00_00_99 + 17; private static final String STACK_MONITORING_REGISTRY_VERSION_VARIABLE = "xpack.stack.monitoring.template.release.version"; private static final String STACK_TEMPLATE_VERSION = "8"; private static final String STACK_TEMPLATE_VERSION_VARIABLE = "xpack.stack.monitoring.template.version"; diff --git a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java index a482cd2c364e2..79227f3dd2cee 100644 --- a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java +++ b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java @@ -46,7 +46,8 @@ public void testFeatureUsage() throws Exception { ArchiveFeatureSetUsage archiveUsage = (ArchiveFeatureSetUsage) usage.getUsage(); assertEquals(0, archiveUsage.getNumberOfArchiveIndices()); - final RestoreSnapshotRequest req = new RestoreSnapshotRequest(repoName, snapshotName).indices(indexName).waitForCompletion(true); + final RestoreSnapshotRequest req = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).indices(indexName) + .waitForCompletion(true); final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().restoreSnapshot(req).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); @@ -70,7 +71,8 @@ public void testFailRestoreOnInvalidLicense() throws Exception { ensureClusterSizeConsistency(); ensureClusterStateConsistency(); - final RestoreSnapshotRequest req = new RestoreSnapshotRequest(repoName, snapshotName).indices(indexName).waitForCompletion(true); + final RestoreSnapshotRequest req = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).indices(indexName) + .waitForCompletion(true); ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, () -> clusterAdmin().restoreSnapshot(req).actionGet() @@ -84,7 +86,8 @@ public void testFailRestoreOnTooOldVersion() { TestRepositoryPlugin.FAKE_VERSIONS_TYPE, Settings.builder().put(getRepositoryOnMaster(repoName).getMetadata().settings()).put("version", Version.fromString("2.0.0").id) ); - final RestoreSnapshotRequest req = new RestoreSnapshotRequest(repoName, snapshotName).indices(indexName).waitForCompletion(true); + final RestoreSnapshotRequest req = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).indices(indexName) + .waitForCompletion(true); SnapshotRestoreException e = expectThrows(SnapshotRestoreException.class, () -> clusterAdmin().restoreSnapshot(req).actionGet()); assertThat( e.getMessage(), @@ -94,7 +97,8 @@ public void testFailRestoreOnTooOldVersion() { // checks that shards are failed if license becomes invalid after successful restore public void testShardAllocationOnInvalidLicense() throws Exception { - final RestoreSnapshotRequest req = new RestoreSnapshotRequest(repoName, snapshotName).indices(indexName).waitForCompletion(true); + final RestoreSnapshotRequest req = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).indices(indexName) + .waitForCompletion(true); final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().restoreSnapshot(req).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); diff --git a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveSettingValidationIntegTests.java b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveSettingValidationIntegTests.java index 3104c91f2e5f1..df86d94da5037 100644 --- a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveSettingValidationIntegTests.java +++ b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveSettingValidationIntegTests.java @@ -19,7 +19,8 @@ public class ArchiveSettingValidationIntegTests extends AbstractArchiveTestCase { public void testCannotRemoveWriteBlock() throws ExecutionException, InterruptedException { - final RestoreSnapshotRequest req = new RestoreSnapshotRequest(repoName, snapshotName).indices(indexName).waitForCompletion(true); + final RestoreSnapshotRequest req = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, repoName, snapshotName).indices(indexName) + .waitForCompletion(true); final RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().restoreSnapshot(req).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java index ec7ca2ae5b681..7ce1da3a07917 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java @@ -239,7 +239,7 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { final RestoreSnapshotResponse restoreSnapshotResponse = client().execute(MountSearchableSnapshotAction.INSTANCE, req).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); - final Map snapshotShards = clusterAdmin().prepareSnapshotStatus(fsRepoName) + final Map snapshotShards = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, fsRepoName) .setSnapshots(snapshotInfo.snapshotId().getName()) .get() .getSnapshots() diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java index 13e5833b133d5..c738033761b3e 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -223,7 +223,10 @@ public void testCreateAndRestoreSearchableSnapshot() throws Exception { final RestoreSnapshotResponse restoreSnapshotResponse = client().execute(MountSearchableSnapshotAction.INSTANCE, req).get(); assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); - final RepositoryMetadata repositoryMetadata = clusterAdmin().prepareGetRepositories(fsRepoName).get().repositories().get(0); + final RepositoryMetadata repositoryMetadata = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, fsRepoName) + .get() + .repositories() + .get(0); assertThat(repositoryMetadata.name(), equalTo(fsRepoName)); assertThat(repositoryMetadata.uuid(), not(equalTo(RepositoryData.MISSING_UUID))); @@ -657,7 +660,7 @@ public void testSnapshotMountedIndexLeavesBlobsUntouched() throws Exception { final SnapshotId snapshotOne = createSnapshot(repositoryName, "snapshot-1", List.of(indexName)).snapshotId(); assertAcked(indicesAdmin().prepareDelete(indexName)); - final SnapshotStatus snapshotOneStatus = clusterAdmin().prepareSnapshotStatus(repositoryName) + final SnapshotStatus snapshotOneStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repositoryName) .setSnapshots(snapshotOne.getName()) .get() .getSnapshots() @@ -669,7 +672,7 @@ public void testSnapshotMountedIndexLeavesBlobsUntouched() throws Exception { ensureGreen(indexName); final SnapshotId snapshotTwo = createSnapshot(repositoryName, "snapshot-2", List.of(indexName)).snapshotId(); - final SnapshotStatus snapshotTwoStatus = clusterAdmin().prepareSnapshotStatus(repositoryName) + final SnapshotStatus snapshotTwoStatus = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repositoryName) .setSnapshots(snapshotTwo.getName()) .get() .getSnapshots() @@ -792,7 +795,12 @@ public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() thr createRepositoryNoVerify(tmpRepositoryName, "fs"); final Path repoPath = internalCluster().getCurrentMasterNodeInstance(Environment.class) .resolveRepoFile( - clusterAdmin().prepareGetRepositories(tmpRepositoryName).get().repositories().get(0).settings().get("location") + clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, tmpRepositoryName) + .get() + .repositories() + .get(0) + .settings() + .get("location") ); initWithSnapshotVersion( tmpRepositoryName, @@ -803,12 +811,12 @@ public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() thr SnapshotsService.INDEX_GEN_IN_REPO_DATA_VERSION ) ); - assertAcked(clusterAdmin().prepareDeleteRepository(tmpRepositoryName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, tmpRepositoryName)); createRepository(repositoryName, "fs", repoPath); } final SnapshotId snapshotOne = createSnapshot(repositoryName, "snapshot-1", List.of(indexName)).snapshotId(); - for (final SnapshotStatus snapshotStatus : clusterAdmin().prepareSnapshotStatus(repositoryName) + for (final SnapshotStatus snapshotStatus : clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repositoryName) .setSnapshots(snapshotOne.getName()) .get() .getSnapshots()) { @@ -822,7 +830,7 @@ public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() thr assertAcked(indicesAdmin().prepareDelete(indexName)); assertThat( - clusterAdmin().prepareGetRepositories(repositoryName).get().repositories().get(0).uuid(), + clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, repositoryName).get().repositories().get(0).uuid(), hasRepositoryUuid ? not(equalTo(RepositoryData.MISSING_UUID)) : equalTo(RepositoryData.MISSING_UUID) ); @@ -847,7 +855,7 @@ public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() thr logger.info("--> starting to take snapshot-2"); final SnapshotId snapshotTwo = createSnapshot(backupRepositoryName, "snapshot-2", List.of(restoredIndexName)).snapshotId(); logger.info("--> finished taking snapshot-2"); - for (final SnapshotStatus snapshotStatus : clusterAdmin().prepareSnapshotStatus(backupRepositoryName) + for (final SnapshotStatus snapshotStatus : clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, backupRepositoryName) .setSnapshots(snapshotTwo.getName()) .get() .getSnapshots()) { @@ -867,17 +875,23 @@ public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() thr final String restoreRepositoryName; if (hasRepositoryUuid && randomBoolean()) { // Re-mount the repository containing the actual data under a different name - final RepositoryMetadata repositoryMetadata = clusterAdmin().prepareGetRepositories(repositoryName).get().repositories().get(0); + final RepositoryMetadata repositoryMetadata = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, repositoryName) + .get() + .repositories() + .get(0); // Rename the repository containing the actual data. final String newRepositoryName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); - assertAcked(clusterAdmin().prepareDeleteRepository(repositoryName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName)); final Settings.Builder settings = Settings.builder().put(repositoryMetadata.settings()); if (randomBoolean()) { settings.put(READONLY_SETTING_KEY, "true"); } assertAcked( - clusterAdmin().preparePutRepository(newRepositoryName).setType("fs").setSettings(settings).setVerify(randomBoolean()) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, newRepositoryName) + .setType("fs") + .setSettings(settings) + .setVerify(randomBoolean()) ); restoreRepositoryName = backupRepositoryName.equals(repositoryName) ? newRepositoryName : backupRepositoryName; } else { @@ -886,7 +900,7 @@ public void testSnapshotOfSearchableSnapshotIncludesNoDataButCanBeRestored() thr logger.info("--> starting to restore snapshot-2"); assertThat( - clusterAdmin().prepareRestoreSnapshot(restoreRepositoryName, snapshotTwo.getName()) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, restoreRepositoryName, snapshotTwo.getName()) .setIndices(restoredIndexName) .get() .status(), @@ -967,13 +981,19 @@ public void testSnapshotOfSearchableSnapshotCanBeRestoredBeforeRepositoryRegiste final SnapshotId backupSnapshot = createSnapshot(backupRepoName, "backup-snapshot", List.of(restoredIndexName)).snapshotId(); // Clear out data & the repo that contains it - final RepositoryMetadata dataRepoMetadata = clusterAdmin().prepareGetRepositories(dataRepoName).get().repositories().get(0); + final RepositoryMetadata dataRepoMetadata = clusterAdmin().prepareGetRepositories(TEST_REQUEST_TIMEOUT, dataRepoName) + .get() + .repositories() + .get(0); assertAcked(indicesAdmin().prepareDelete(restoredIndexName)); - assertAcked(clusterAdmin().prepareDeleteRepository(dataRepoName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, dataRepoName)); // Restore the backup snapshot assertThat( - clusterAdmin().prepareRestoreSnapshot(backupRepoName, backupSnapshot.getName()).setIndices(restoredIndexName).get().status(), + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, backupRepoName, backupSnapshot.getName()) + .setIndices(restoredIndexName) + .get() + .status(), equalTo(RestStatus.ACCEPTED) ); @@ -1011,7 +1031,11 @@ public void testSnapshotOfSearchableSnapshotCanBeRestoredBeforeRepositoryRegiste if (randomBoolean()) { settings.put(READONLY_SETTING_KEY, "true"); } - assertAcked(clusterAdmin().preparePutRepository(newRepositoryName).setType("fs").setSettings(settings)); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, newRepositoryName) + .setType("fs") + .setSettings(settings) + ); ensureGreen(restoredIndexName); assertTotalHits(restoredIndexName, originalAllHits, originalBarHits); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRepositoryIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRepositoryIntegTests.java index f97151f9ae330..a3da932398fb1 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRepositoryIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsRepositoryIntegTests.java @@ -71,7 +71,7 @@ public void testRepositoryUsedBySearchableSnapshotCanBeUpdatedButNotUnregistered } assertAcked( - clusterAdmin().preparePutRepository(repositoryName) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) .setType(FsRepository.TYPE) .setSettings( Settings.builder() @@ -86,12 +86,13 @@ public void testRepositoryUsedBySearchableSnapshotCanBeUpdatedButNotUnregistered final String snapshotWithMountedIndices = snapshotName + "-with-mounted-indices"; createSnapshot(repositoryName, snapshotWithMountedIndices, Arrays.asList(mountedIndices)); assertAcked(indicesAdmin().prepareDelete(mountedIndices)); - assertAcked(clusterAdmin().prepareDeleteRepository(repositoryName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName)); updatedRepositoryName = repositoryName + "-with-mounted-indices"; createRepository(updatedRepositoryName, FsRepository.TYPE, repositorySettings, randomBoolean()); final RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, updatedRepositoryName, snapshotWithMountedIndices ).setWaitForCompletion(true).setIndices(mountedIndices).get(); @@ -103,7 +104,7 @@ public void testRepositoryUsedBySearchableSnapshotCanBeUpdatedButNotUnregistered for (int i = 0; i < nbMountedIndices; i++) { RepositoryConflictException exception = expectThrows( RepositoryConflictException.class, - () -> clusterAdmin().prepareDeleteRepository(updatedRepositoryName).get() + () -> clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, updatedRepositoryName).get() ); assertThat( exception.getMessage(), @@ -118,7 +119,7 @@ public void testRepositoryUsedBySearchableSnapshotCanBeUpdatedButNotUnregistered assertAcked(indicesAdmin().prepareDelete(mountedIndices[i])); } - assertAcked(clusterAdmin().prepareDeleteRepository(updatedRepositoryName)); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, updatedRepositoryName)); } public void testMountIndexWithDeletionOfSnapshotFailsIfNotSingleIndexSnapshot() throws Exception { @@ -299,7 +300,7 @@ public void testRestoreSearchableSnapshotIndexConflicts() throws Exception { logger.info("--> restoring snapshot of searchable snapshot index [{}] should be conflicting", mountedIndex); final SnapshotRestoreException exception = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repository, snapshotOfMountedIndex) + () -> clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshotOfMountedIndex) .setIndices(mountedIndex) .setWaitForCompletion(true) .get() @@ -360,7 +361,7 @@ public void testRestoreSearchableSnapshotIndexWithDifferentSettingsConflicts() t : randomSubsetOf(randomIntBetween(1, nbMountedIndices), mountedIndices); final SnapshotRestoreException exception = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repository, snapshotOfMountedIndices) + () -> clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshotOfMountedIndices) .setIndices(restorables.toArray(String[]::new)) .setIndexSettings(deleteSnapshotIndexSettings(deleteSnapshot == false)) .setRenameReplacement("restored-with-different-setting-$1") @@ -380,7 +381,11 @@ public void testRestoreSearchableSnapshotIndexWithDifferentSettingsConflicts() t ) ); - final RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(repository, snapshotOfMountedIndices) + final RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot( + TEST_REQUEST_TIMEOUT, + repository, + snapshotOfMountedIndices + ) .setIndices(restorables.toArray(String[]::new)) .setIndexSettings(indexSettings) .setRenameReplacement("restored-with-same-setting-$1") diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsResizeIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsResizeIntegTests.java index 4f7c7f7aa0b74..0811ee86b3c32 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsResizeIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsResizeIntegTests.java @@ -55,8 +55,8 @@ public void setUp() throws Exception { @Override public void tearDown() throws Exception { assertAcked(indicesAdmin().prepareDelete("mounted-*")); - assertAcked(clusterAdmin().prepareDeleteSnapshot("repository", "snapshot").get()); - assertAcked(clusterAdmin().prepareDeleteRepository("repository")); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "repository", "snapshot").get()); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repository")); super.tearDown(); } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsUuidValidationIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsUuidValidationIntegTests.java index c352c37ccadf8..3a90a2b23abc6 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsUuidValidationIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsUuidValidationIntegTests.java @@ -113,7 +113,7 @@ public void testMountFailsIfSnapshotChanged() throws Exception { final RestoreBlockingActionFilter restoreBlockingActionFilter = getBlockingActionFilter(); restoreBlockingActionFilter.awaitExecution(); - assertAcked(clusterAdmin().prepareDeleteSnapshot(fsRepoName, snapshotName).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, fsRepoName, snapshotName).get()); createFullSnapshot(fsRepoName, snapshotName); assertFalse(responseFuture.isDone()); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java index 73e2e56b31ca5..a34bcd16c375b 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java @@ -150,14 +150,14 @@ private int createIndices() throws InterruptedException { private void createRepository(String name, String type) { assertAcked( - clusterAdmin().preparePutRepository(name) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, name) .setType(type) .setSettings(Settings.builder().put("location", randomRepoPath()).build()) ); } private void createSnapshot(String repository, String snapshot, int nbIndices) { - var snapshotInfo = clusterAdmin().prepareCreateSnapshot(repository, snapshot) + var snapshotInfo = clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repository, snapshot) .setIndices("index-*") .setIncludeGlobalState(false) .setWaitForCompletion(true) diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java index 56074f97650f0..40b7e08936fa3 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/SearchableSnapshotsBlobStoreCacheMaintenanceIntegTests.java @@ -242,7 +242,7 @@ public void testPeriodicMaintenance() throws Exception { assertAcked(systemClient().admin().indices().prepareDelete(SNAPSHOT_BLOB_CACHE_INDEX)); assertAcked(indicesAdmin().prepareDelete(indicesToDelete.toArray(String[]::new))); - assertAcked(clusterAdmin().prepareDeleteRepository("repo")); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo")); ensureClusterStateConsistency(); assertThat(numberOfEntriesInCache(), equalTo(0L)); @@ -253,7 +253,7 @@ public void testPeriodicMaintenance() throws Exception { ); try { // restores the .snapshot-blob-cache index with - now obsolete - documents - final RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot("backup", "backup") + final RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "backup", "backup") // We only want to restore the blob cache index. Since we can't do that by name, specify an index that doesn't exist and // allow no indices - this way, only the indices resolved from the feature state will be resolved. .setIndices("this-index-doesnt-exist-i-know-because-#-is-illegal-in-index-names") @@ -394,7 +394,7 @@ private Map> mountRandomIndicesWithCache(String re } else { logger.info("--> mounted index [{}] did not generate any entry in cache", mountedIndex); - assertAcked(clusterAdmin().prepareDeleteSnapshot(repositoryName, snapshot).get()); + assertAcked(clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshot).get()); assertAcked(indicesAdmin().prepareDelete(mountedIndex)); } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java index 42542b63c80d1..ab38a89870500 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java @@ -153,14 +153,18 @@ public void testConcurrentPrewarming() throws Exception { } logger.debug("--> registering repository"); - assertAcked(clusterAdmin().preparePutRepository("repository").setType(FsRepository.TYPE).setSettings(repositorySettings.build())); + assertAcked( + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repository") + .setType(FsRepository.TYPE) + .setSettings(repositorySettings.build()) + ); logger.debug("--> snapshotting indices"); - final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot("repository", "snapshot") - .setIncludeGlobalState(false) - .setIndices("index-*") - .setWaitForCompletion(true) - .get(); + final CreateSnapshotResponse createSnapshotResponse = clusterAdmin().prepareCreateSnapshot( + TEST_REQUEST_TIMEOUT, + "repository", + "snapshot" + ).setIncludeGlobalState(false).setIndices("index-*").setWaitForCompletion(true).get(); final int totalShards = shardsPerIndex.values().stream().mapToInt(i -> i).sum(); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(totalShards)); @@ -172,11 +176,14 @@ public void testConcurrentPrewarming() throws Exception { assertAcked(indicesAdmin().prepareDelete("index-*")); logger.debug("--> deleting repository"); - assertAcked(clusterAdmin().prepareDeleteRepository("repository")); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repository")); logger.debug("--> registering tracking repository"); assertAcked( - clusterAdmin().preparePutRepository("repository").setType("tracking").setVerify(false).setSettings(repositorySettings.build()) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repository") + .setType("tracking") + .setVerify(false) + .setSettings(repositorySettings.build()) ); TrackingRepositoryPlugin tracker = getTrackingRepositoryPlugin(); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java index 18e9a500a77ad..bdcce1e518700 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java @@ -237,7 +237,7 @@ protected void masterOperation( dataTierAllocationSetting.get(indexSettings); } - RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(repoName, snapName) + RestoreSnapshotRequest restoreSnapshotRequest = new RestoreSnapshotRequest(request.masterNodeTimeout(), repoName, snapName) // Restore the single index specified .indices(indexName) // Always rename it to the desired mounted index name @@ -253,8 +253,6 @@ protected void masterOperation( .includeAliases(false) // Pass through the wait-for-completion flag .waitForCompletion(request.waitForCompletion()) - // Pass through the master-node timeout - .masterNodeTimeout(request.masterNodeTimeout()) // Fail the restore if the snapshot found above is swapped out from under us before the restore happens .snapshotUuid(snapshotId.getUUID()) // Log snapshot restore at the DEBUG log level diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ClusterPrivilegeIntegrationTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ClusterPrivilegeIntegrationTests.java index 532841ecf2172..f17a0552f5834 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ClusterPrivilegeIntegrationTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ClusterPrivilegeIntegrationTests.java @@ -253,7 +253,9 @@ public void testThatSnapshotAndRestore() throws Exception { private void waitForSnapshotToFinish(String repo, String snapshot) throws Exception { assertBusy(() -> { - SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(repo).setSnapshots(snapshot).get(); + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repo) + .setSnapshots(snapshot) + .get(); assertThat(response.getSnapshots().get(0).getState(), is(SnapshotsInProgress.State.SUCCESS)); // The status of the snapshot in the repository can become SUCCESS before it is fully finalized in the cluster state so wait for // it to disappear from the cluster state as well diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureResetTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureResetTests.java index 74ffe762d980b..38687540e79f3 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureResetTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureResetTests.java @@ -97,7 +97,7 @@ public void testFeatureResetManageRole() { } public void testFeatureResetNoManageRole() { - final ResetFeatureStateRequest req = new ResetFeatureStateRequest(); + final ResetFeatureStateRequest req = new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT); client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("usr", SUPER_USER_PASSWD))) .admin() @@ -124,7 +124,7 @@ public void onFailure(Exception e) { } private void assertResetSuccessful(String user, SecureString password) { - final ResetFeatureStateRequest req = new ResetFeatureStateRequest(); + final ResetFeatureStateRequest req = new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT); client().filterWithHeader(Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue(user, password))) .admin() diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureStateIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureStateIntegTests.java index daea0e38c2c40..2ca799e94874c 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureStateIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/SecurityFeatureStateIntegTests.java @@ -68,7 +68,7 @@ protected Settings nodeSettings() { public void testSecurityFeatureStateSnapshotAndRestore() throws Exception { // set up a snapshot repository final String repositoryName = "test-repo"; - clusterAdmin().preparePutRepository(repositoryName) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, repositoryName) .setType("fs") .setSettings(Settings.builder().put("location", repositoryLocation)) .get(); @@ -105,7 +105,7 @@ public void testSecurityFeatureStateSnapshotAndRestore() throws Exception { // snapshot state final String snapshotName = "security-state"; - clusterAdmin().prepareCreateSnapshot(repositoryName, snapshotName) + clusterAdmin().prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) .setIndices("test_index") .setFeatureStates("LocalStateSecurity") .get(); @@ -131,7 +131,7 @@ public void testSecurityFeatureStateSnapshotAndRestore() throws Exception { client().admin().indices().prepareClose("test_index").get(); // restore state - clusterAdmin().prepareRestoreSnapshot(repositoryName, snapshotName) + clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repositoryName, snapshotName) .setFeatureStates("LocalStateSecurity") .setIndices("test_index") .setWaitForCompletion(true) @@ -168,7 +168,9 @@ private Response performAuthenticatedRequest(Request request, String token) thro private void waitForSnapshotToFinish(String repo, String snapshot) throws Exception { assertBusy(() -> { - SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(repo).setSnapshots(snapshot).get(); + SnapshotsStatusResponse response = clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, repo) + .setSnapshots(snapshot) + .get(); assertThat(response.getSnapshots().get(0).getState(), is(SnapshotsInProgress.State.SUCCESS)); // The status of the snapshot in the repository can become SUCCESS before it is fully finalized in the cluster state so wait for // it to disappear from the cluster state as well diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index 8692c999d8b35..3badd14ef8348 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -503,7 +503,7 @@ public void testSnapshotDeleteRestore() { ensureGreen(SECURITY_MAIN_ALIAS); logger.info("--> creating repository"); assertAcked( - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("fs") .setSettings( Settings.builder() @@ -517,7 +517,7 @@ public void testSnapshotDeleteRestore() { SnapshotInfo snapshotInfo = client().filterWithHeader(Collections.singletonMap("Authorization", token)) .admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap-1") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setWaitForCompletion(true) .setIncludeGlobalState(false) .setFeatureStates(SECURITY_FEATURE_NAME) @@ -540,7 +540,7 @@ public void testSnapshotDeleteRestore() { GetRolesResponse getRolesResponse = new GetRolesRequestBuilder(client()).names("test_role").get(); assertThat(getRolesResponse.roles().length, is(0)); // restore - RestoreSnapshotResponse response = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap-1") + RestoreSnapshotResponse response = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap-1") .setWaitForCompletion(true) .setIncludeAliases(randomBoolean()) // Aliases are always restored for system indices .setFeatureStates(SECURITY_FEATURE_NAME) @@ -566,7 +566,7 @@ public void testSnapshotDeleteRestore() { .prepareCreate("idx") .get(); assertThat(createIndexResponse.isAcknowledged(), is(true)); - assertAcked(clusterAdmin().prepareDeleteRepository("test-repo")); + assertAcked(clusterAdmin().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo")); } public void testAuthenticateWithDeletedRole() { diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java index f0858c81ac1c1..d04c2a4b0c578 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authz/SnapshotUserRoleIntegTests.java @@ -46,7 +46,9 @@ public class SnapshotUserRoleIntegTests extends NativeRealmIntegTestCase { public void setupClusterBeforeSnapshot() throws IOException { logger.info("--> creating repository"); assertAcked( - clusterAdmin().preparePutRepository("repo").setType("fs").setSettings(Settings.builder().put("location", randomRepoPath())) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo") + .setType("fs") + .setSettings(Settings.builder().put("location", randomRepoPath())) ); logger.info("--> creating ordinary index"); @@ -68,7 +70,7 @@ public void testSnapshotUserRoleCanSnapshotAndSeeAllIndices() { // view repositories final GetRepositoriesResponse getRepositoriesResponse = client.admin() .cluster() - .prepareGetRepositories(randomFrom("*", "_all")) + .prepareGetRepositories(TEST_REQUEST_TIMEOUT, randomFrom("*", "_all")) .get(); assertThat(getRepositoriesResponse.repositories().size(), is(1)); assertThat(getRepositoriesResponse.repositories().get(0).name(), is("repo")); @@ -83,7 +85,7 @@ public void testSnapshotUserRoleCanSnapshotAndSeeAllIndices() { // create snapshot that includes restricted indices final CreateSnapshotResponse snapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("repo", "snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "repo", "snap") .setIndices(randomFrom("_all", "*")) .setIndicesOptions(IndicesOptions.strictExpandHidden()) .setWaitForCompletion(true) @@ -91,7 +93,7 @@ public void testSnapshotUserRoleCanSnapshotAndSeeAllIndices() { assertThat(snapshotResponse.getSnapshotInfo().state(), is(SnapshotState.SUCCESS)); assertThat(snapshotResponse.getSnapshotInfo().indices(), containsInAnyOrder(INTERNAL_SECURITY_MAIN_INDEX_7, ordinaryIndex)); // view snapshots for repo - final GetSnapshotsResponse getSnapshotResponse = client.admin().cluster().prepareGetSnapshots("repo").get(); + final GetSnapshotsResponse getSnapshotResponse = client.admin().cluster().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "repo").get(); assertThat(getSnapshotResponse.getSnapshots().size(), is(1)); assertThat(getSnapshotResponse.getSnapshots().get(0).snapshotId().getName(), is("snap")); assertThat(getSnapshotResponse.getSnapshots().get(0).indices(), containsInAnyOrder(INTERNAL_SECURITY_MAIN_INDEX_7, ordinaryIndex)); @@ -127,7 +129,7 @@ public void testSnapshotUserRoleUnathorizedForDestructiveActions() { assertThrowsAuthorizationException( () -> client.admin() .cluster() - .preparePutRepository("some_other_repo") + .preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "some_other_repo") .setType("fs") .setSettings(Settings.builder().put("location", randomRepoPath())) .get(), @@ -136,18 +138,24 @@ public void testSnapshotUserRoleUnathorizedForDestructiveActions() { ); // try delete repo assertThrowsAuthorizationException( - () -> client.admin().cluster().prepareDeleteRepository("repo").get(), + () -> client.admin().cluster().prepareDeleteRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "repo").get(), "cluster:admin/repository/delete", "snapshot_user" ); // try fumble with snapshots assertThrowsAuthorizationException( - () -> client.admin().cluster().prepareRestoreSnapshot("repo", randomAlphaOfLength(4).toLowerCase(Locale.ROOT)).get(), + () -> client.admin() + .cluster() + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "repo", randomAlphaOfLength(4).toLowerCase(Locale.ROOT)) + .get(), "cluster:admin/snapshot/restore", "snapshot_user" ); assertThrowsAuthorizationException( - () -> client.admin().cluster().prepareDeleteSnapshot("repo", randomAlphaOfLength(4).toLowerCase(Locale.ROOT)).get(), + () -> client.admin() + .cluster() + .prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, "repo", randomAlphaOfLength(4).toLowerCase(Locale.ROOT)) + .get(), "cluster:admin/snapshot/delete", "snapshot_user" ); diff --git a/x-pack/plugin/security/src/test/resources/missing-version-security-index-template.json b/x-pack/plugin/security/src/test/resources/missing-version-security-index-template.json index c4c74f190ddb1..8d3881c73a95f 100644 --- a/x-pack/plugin/security/src/test/resources/missing-version-security-index-template.json +++ b/x-pack/plugin/security/src/test/resources/missing-version-security-index-template.json @@ -3,7 +3,6 @@ "order" : 1000, "settings" : { "number_of_shards" : 1, - "number_of_replicas" : 0, "auto_expand_replicas" : "0-all", "analysis" : { "filter" : { diff --git a/x-pack/plugin/slm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java b/x-pack/plugin/slm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java index 26923c545570a..ff091d432e546 100644 --- a/x-pack/plugin/slm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java +++ b/x-pack/plugin/slm/qa/with-security/src/javaRestTest/java/org/elasticsearch/xpack/security/PermissionsIT.java @@ -83,7 +83,7 @@ public void testSLMWithPermissions() throws Exception { createUser("slm_admin", "slm-admin-password", "slm-manage"); createUser("slm_user", "slm-user-password", "slm-read"); - PutRepositoryRequest repoRequest = new PutRepositoryRequest(); + PutRepositoryRequest repoRequest = new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); Settings.Builder settingsBuilder = Settings.builder().put("location", "."); repoRequest.settings(settingsBuilder); repoRequest.name(repo); diff --git a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java index c68e7174923f8..d93f40f7c0a82 100644 --- a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java +++ b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMFileSettingsIT.java @@ -247,7 +247,7 @@ public void testSettingsApplied() throws Exception { } logger.info("--> create snapshot manually"); - var request = new CreateSnapshotRequest("repo", "file-snap").waitForCompletion(true); + var request = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT, "repo", "file-snap").waitForCompletion(true); var response = clusterAdmin().createSnapshot(request).get(); RestStatus status = response.getSnapshotInfo().status(); assertEquals(RestStatus.OK, status); @@ -273,7 +273,7 @@ public void testSettingsApplied() throws Exception { // Cancel/delete the snapshot try { - clusterAdmin().prepareDeleteSnapshot(REPO, snapshotName).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshotName).get(); } catch (SnapshotMissingException e) { // ignore } diff --git a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java index a64df7f871d97..1d797095c1f69 100644 --- a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java +++ b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java @@ -150,7 +150,7 @@ public void testSnapshotInProgress() throws Exception { // Cancel/delete the snapshot try { - clusterAdmin().prepareDeleteSnapshot(REPO, snapshotName).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, REPO, snapshotName).get(); } catch (SnapshotMissingException e) { // ignore } @@ -263,7 +263,7 @@ public void testRetentionWhileSnapshotInProgress() throws Exception { assertBusy(() -> { try { logger.info("--> cancelling snapshot {}", secondSnapName); - clusterAdmin().prepareDeleteSnapshot(REPO, secondSnapName).get(); + clusterAdmin().prepareDeleteSnapshot(TEST_REQUEST_TIMEOUT, REPO, secondSnapName).get(); } catch (ConcurrentSnapshotExecutionException e) { logger.info("--> attempted to stop second snapshot", e); // just wait and retry @@ -385,7 +385,7 @@ private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Ex logger.info("--> verify that snapshot [{}] is {}", failedSnapshotName.get(), expectedUnsuccessfulState); assertBusy(() -> { try { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO) + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO) .setSnapshots(failedSnapshotName.get()) .get(); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); @@ -432,7 +432,7 @@ private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Ex assertBusy(() -> { final SnapshotInfo snapshotInfo; try { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO) + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO) .setSnapshots(successfulSnapshotName.get()) .get(); snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); @@ -446,7 +446,7 @@ private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Ex // Check that the failed snapshot from before still exists, now that retention has run { logger.info("--> verify that snapshot [{}] still exists", failedSnapshotName.get()); - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO) + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO) .setSnapshots(failedSnapshotName.get()) .get(); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); @@ -465,7 +465,7 @@ private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Ex logger.info("--> waiting for {} snapshot [{}] to be deleted", expectedUnsuccessfulState, failedSnapshotName.get()); assertBusy(() -> { try { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO) + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO) .setSnapshots(failedSnapshotName.get()) .get(); assertThat(snapshotsStatusResponse.getSnapshots(), empty()); @@ -478,7 +478,7 @@ private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Ex failedSnapshotName.get(), successfulSnapshotName.get() ); - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO) + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO) .setSnapshots(successfulSnapshotName.get()) .get(); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); @@ -524,7 +524,7 @@ public void testSLMRetentionAfterRestore() throws Exception { }); logger.info("--> restoring index"); - RestoreSnapshotRequest restoreReq = new RestoreSnapshotRequest(REPO, snapshotName); + RestoreSnapshotRequest restoreReq = new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT, REPO, snapshotName); restoreReq.indices(indexName); restoreReq.renamePattern("(.+)"); restoreReq.renameReplacement("restored_$1"); @@ -542,7 +542,9 @@ public void testSLMRetentionAfterRestore() throws Exception { logger.info("--> waiting for {} snapshot to be deleted", snapshotName); assertBusy(() -> { try { - GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(REPO).setSnapshots(snapshotName).get(); + GetSnapshotsResponse snapshotsStatusResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, REPO) + .setSnapshots(snapshotName) + .get(); assertThat(snapshotsStatusResponse.getSnapshots(), empty()); } catch (SnapshotMissingException e) { // This is what we want to happen @@ -552,7 +554,7 @@ public void testSLMRetentionAfterRestore() throws Exception { } private SnapshotsStatusResponse getSnapshotStatus(String snapshotName) { - return clusterAdmin().prepareSnapshotStatus(REPO).setSnapshots(snapshotName).get(); + return clusterAdmin().prepareSnapshotStatus(TEST_REQUEST_TIMEOUT, REPO).setSnapshots(snapshotName).get(); } private void createAndPopulateIndex(String indexName) throws InterruptedException { diff --git a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleInitialisationTests.java b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleInitialisationTests.java index e5e71a38ce6b4..61f83319ef7ad 100644 --- a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleInitialisationTests.java +++ b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SnapshotLifecycleInitialisationTests.java @@ -61,7 +61,7 @@ protected Collection> getPlugins() { public void testSLMIsInRunningModeWhenILMIsDisabled() throws Exception { client().execute( TransportPutRepositoryAction.TYPE, - new PutRepositoryRequest().name("repo") + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).name("repo") .type("fs") .settings(Settings.builder().put("repositories.fs.location", repositoryLocation).build()) ).get(10, TimeUnit.SECONDS); diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java index de24c1793d483..028633a480314 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java @@ -92,7 +92,7 @@ public static Optional maybeTakeSnapshot( Optional maybeMetadata = getSnapPolicyMetadata(jobId, clusterService.state()); String snapshotName = maybeMetadata.map(policyMetadata -> { // don't time out on this request to not produce failed SLM runs in case of a temporarily slow master node - CreateSnapshotRequest request = policyMetadata.getPolicy().toRequest().masterNodeTimeout(TimeValue.MAX_VALUE); + CreateSnapshotRequest request = policyMetadata.getPolicy().toRequest(TimeValue.MAX_VALUE); final LifecyclePolicySecurityClient clientWithHeaders = new LifecyclePolicySecurityClient( client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java index 71afcb4548a06..fea84e1a032dd 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java @@ -309,8 +309,7 @@ void deleteSnapshot( // don't time out on this request to not produce failed SLM runs in case of a temporarily slow master node client.admin() .cluster() - .prepareDeleteSnapshot(repo, snapshot.getName()) - .setMasterNodeTimeout(TimeValue.MAX_VALUE) + .prepareDeleteSnapshot(TimeValue.MAX_VALUE, repo, snapshot.getName()) .execute(ActionListener.wrap(acknowledgedResponse -> { slmStats.snapshotDeleted(slmPolicy); listener.onResponse(acknowledgedResponse); diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java index 7deee466d7292..9e5265d91dc75 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java @@ -42,11 +42,13 @@ public void testToRequest() { Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY ); - CreateSnapshotRequest request = p.toRequest(); - CreateSnapshotRequest expected = new CreateSnapshotRequest().userMetadata(Collections.singletonMap("policy", "id")); + CreateSnapshotRequest request = p.toRequest(TEST_REQUEST_TIMEOUT); + CreateSnapshotRequest expected = new CreateSnapshotRequest(TEST_REQUEST_TIMEOUT).userMetadata( + Collections.singletonMap("policy", "id") + ); p = new SnapshotLifecyclePolicy("id", "name", "0 1 2 3 4 ? 2099", "repo", null, null); - request = p.toRequest(); + request = p.toRequest(TEST_REQUEST_TIMEOUT); expected.waitForCompletion(true).snapshot(request.snapshot()).repository("repo"); assertEquals(expected, request); } diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index b9cde5d3a6b09..caae6dd393a0c 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -771,7 +771,7 @@ public void testRecoveryAfterRestoreUsesSnapshots() throws Exception { assertAcked(indicesAdmin().prepareDelete(indexName).get()); List restoredIndexDataNodes = internalCluster().startDataOnlyNodes(2); - RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(repoName, "snap") + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, repoName, "snap") .setIndices(indexName) .setIndexSettings( Settings.builder() @@ -1541,7 +1541,9 @@ private Store.MetadataSnapshot getMetadataSnapshot(String nodeName, String index } private long getSnapshotSizeForIndex(String repository, String snapshot, String index) { - GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots(repository).addSnapshots(snapshot).get(); + GetSnapshotsResponse getSnapshotsResponse = clusterAdmin().prepareGetSnapshots(TEST_REQUEST_TIMEOUT, repository) + .addSnapshots(snapshot) + .get(); for (SnapshotInfo snapshotInfo : getSnapshotsResponse.getSnapshots()) { SnapshotInfo.IndexSnapshotDetails indexSnapshotDetails = snapshotInfo.indexSnapshotDetails().get(index); assertThat(indexSnapshotDetails, is(notNullValue())); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java index 45e63eb9ff31f..47b44f41f72d2 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java @@ -91,7 +91,10 @@ public void testRepositoryAnalysis() { } assertAcked( - clusterAdmin().preparePutRepository("test-repo").setVerify(false).setType(TestPlugin.ASSERTING_REPO_TYPE).setSettings(settings) + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") + .setVerify(false) + .setType(TestPlugin.ASSERTING_REPO_TYPE) + .setSettings(settings) ); final AssertingBlobStore blobStore = new AssertingBlobStore(settings.get(BASE_PATH_SETTING_KEY)); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml index b846dbe858f61..cc0e8aff9b239 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/aggregate-metrics/100_synthetic_source.yml @@ -76,7 +76,6 @@ aggregate_metric_double with ignore_malformed: index: index: test id: "1" - refresh: true body: metric: min: 18.2 @@ -88,11 +87,22 @@ aggregate_metric_double with ignore_malformed: value_count: 50 - do: - search: + index: + index: test + id: "2" + body: + metric: ["hey", {"value_count": 1, "min": 18.2,"max": 100}, [123, 456]] + + - do: + indices.refresh: {} + + - do: + get: index: test + id: "1" - match: - hits.hits.0._source: + _source: metric: min: 18.2 max: 100 @@ -102,3 +112,12 @@ aggregate_metric_double with ignore_malformed: field: "field" value_count: 50 + - do: + get: + index: test + id: "2" + + - match: + _source: + metric: [{"min": 18.2,"max": 100.0, "value_count": 1}, "hey", 123, 456] + diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml new file mode 100644 index 0000000000000..fc2e22d857358 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz_api_keys/30_field_level_security_synthetic_source.yml @@ -0,0 +1,362 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + +--- +Filter single field: + - do: + indices.create: + index: index_fls + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + secret: + type: keyword + + - do: + bulk: + index: index_fls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "secret":"squirrel"}' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_fls" ] + privileges: [ "read" ] + field_security: + grant: [ "name" ] + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_fls + - match: { hits.total.value: 1 } + - match: { hits.total.relation: "eq" } + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.secret: squirrel } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_fls + - match: { hits.total.value: 1 } + - match: { hits.total.relation: "eq" } + - match: { hits.hits.0._source.name: A } + - is_false: "hits.hits.0._source.secret" + +--- +Filter fields in object: + - do: + indices.create: + index: index_fls + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + object: + type: object + properties: + secret: + type: keyword + public: + type: keyword + + - do: + bulk: + index: index_fls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "object":{ "secret":"mission", "public":"interest" }}' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_fls" ] + privileges: [ "read", "monitor" ] + field_security: + grant: [ "*" ] + except: [ "object.secret" ] + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_fls + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.secret: mission } + - match: { hits.hits.0._source.object.public: interest } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_fls + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.public: interest } + - is_false: "_source.object.secret" + + +--- +Fields under a disabled object - uses _ignored_source: + - do: + indices.create: + index: index_fls + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + object: + type: object + enabled: false + + - do: + bulk: + index: index_fls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "object": [ { "secret":"mission1", "public":"interest1" }, { "secret":"mission2", "public":"interest2" } ] }' + - '{"create": { }}' + - '{"name": "B", "object": { "secret":"mission", "public":"interest" } }' + - '{"create": { }}' + - '{"name": "C", "object": { "foo":"bar", "public":"interest" } }' + - '{"create": { }}' + - '{"name": "D", "object": [10, 20, 30, 40] }' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_fls" ] + privileges: [ "read", "monitor" ] + field_security: + grant: [ "*" ] + except: [ "object.secret" ] + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_fls + sort: name + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.0.secret: mission1 } + - match: { hits.hits.0._source.object.0.public: interest1 } + - match: { hits.hits.0._source.object.1.secret: mission2 } + - match: { hits.hits.0._source.object.1.public: interest2 } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.object.secret: mission } + - match: { hits.hits.1._source.object.public: interest } + - match: { hits.hits.2._source.name: C } + - match: { hits.hits.2._source.object.foo: bar } + - match: { hits.hits.2._source.object.public: interest } + - match: { hits.hits.3._source.name: D } + - match: { hits.hits.3._source.object: [ 10, 20, 30, 40] } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_fls + sort: name + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.0.public: interest1 } + - match: { hits.hits.0._source.object.1.public: interest2 } + - is_false: "hits.hits.0._source.object.0.secret" + - is_false: "hits.hits.0._source.object.1.secret" + - match: { hits.hits.1._source.name: "B" } + - match: { hits.hits.1._source.object.public: interest } + - is_false: "hits.hits.1._source.object.secret" + - match: { hits.hits.2._source.name: C } + - match: { hits.hits.2._source.object.foo: bar } + - match: { hits.hits.2._source.object.public: interest } + - match: { hits.hits.3._source.name: D } + - match: { hits.hits.3._source.object: [ 10, 20, 30, 40 ] } + + +--- +Dynamic fields beyond limit - uses _ignored_source: + - do: + indices.create: + index: index_fls + body: + settings: + index: + mapping: + total_fields: + ignore_dynamic_beyond_limit: true + limit: 2 + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + object: + type: object + + - do: + bulk: + index: index_fls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "object":{ "secret":"mission", "public":"interest" }}' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_fls" ] + privileges: [ "read", "monitor" ] + field_security: + grant: [ "*" ] + except: [ "object.secret" ] + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_fls + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.secret: mission } + - match: { hits.hits.0._source.object.public: interest } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_fls + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.object.public: interest } + - is_false: "hits.hits.0._source.object.secret" + + +--- +Field with ignored_malformed: + - do: + indices.create: + index: index_fls + body: + mappings: + _source: + mode: synthetic + properties: + name: + type: keyword + secret: + type: integer + ignore_malformed: true + + - do: + bulk: + index: index_fls + refresh: true + body: + - '{"create": { }}' + - '{"name": "A", "secret":"squirrel"}' + - '{"create": { }}' + - '{"name": "B", "secret": [ 10, "squirrel", 20] }' + - match: { errors: false } + + - do: + security.create_api_key: + body: + name: "test-fls" + expiration: "1d" + role_descriptors: + index_access: + indices: + - names: [ "index_fls" ] + privileges: [ "read" ] + field_security: + grant: [ "name" ] + - match: { name: "test-fls" } + - is_true: id + - set: + id: api_key_id + encoded: credentials + + # With superuser... + - do: + search: + index: index_fls + sort: name + - match: { hits.hits.0._source.name: A } + - match: { hits.hits.0._source.secret: squirrel } + - match: { hits.hits.1._source.name: B } + - match: { hits.hits.1._source.secret: [ 10, 20, "squirrel"] } + + # With FLS API Key + - do: + headers: + Authorization: "ApiKey ${credentials}" + search: + index: index_fls + - match: { hits.hits.0._source.name: A } + - is_false: "hits.hits.0._source.secret" + - match: { hits.hits.1._source.name: B } + - is_false: "hits.hits.1._source.secret" diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/TransformSingleNodeTestCase.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/TransformSingleNodeTestCase.java index 603b37d3e41f3..33d4d17367673 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/TransformSingleNodeTestCase.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/TransformSingleNodeTestCase.java @@ -40,7 +40,7 @@ protected Settings nodeSettings() { @After public void cleanup() { - client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest()).actionGet(); + client().execute(ResetFeatureStateAction.INSTANCE, new ResetFeatureStateRequest(TEST_REQUEST_TIMEOUT)).actionGet(); } protected void assertAsync( diff --git a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java index 2876fc50e036f..58e6e736b1207 100644 --- a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java +++ b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java @@ -186,7 +186,7 @@ public void testBasicSnapshotRestoreWorkFlow() { final String nonDedicatedVotingOnlyNode = internalCluster().startNode(dataContainingVotingOnlyNodeSettings); assertAcked( - clusterAdmin().preparePutRepository("test-repo") + clusterAdmin().preparePutRepository(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "test-repo") .setType("verifyaccess-fs") .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) ); @@ -195,7 +195,11 @@ public void testBasicSnapshotRestoreWorkFlow() { createIndex("test-idx-3"); ensureGreen(); - VerifyRepositoryResponse verifyResponse = clusterAdmin().prepareVerifyRepository("test-repo").get(); + VerifyRepositoryResponse verifyResponse = clusterAdmin().prepareVerifyRepository( + TEST_REQUEST_TIMEOUT, + TEST_REQUEST_TIMEOUT, + "test-repo" + ).get(); // only the da assertEquals(3, verifyResponse.getNodes().size()); assertTrue(verifyResponse.getNodes().stream().noneMatch(nw -> nw.getName().equals(dedicatedVotingOnlyNode))); @@ -207,7 +211,7 @@ public void testBasicSnapshotRestoreWorkFlow() { Client client = client(); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot("test-repo", "test-snap") + .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .setIndices(indicesToSnapshot) .get(); @@ -219,7 +223,7 @@ public void testBasicSnapshotRestoreWorkFlow() { List snapshotInfos = client.admin() .cluster() - .prepareGetSnapshots("test-repo") + .prepareGetSnapshots(TEST_REQUEST_TIMEOUT, "test-repo") .setSnapshots(randomFrom("test-snap", "_all", "*", "*-snap", "test*")) .get() .getSnapshots(); @@ -234,7 +238,7 @@ public void testBasicSnapshotRestoreWorkFlow() { logger.info("--> restore all indices from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") + .prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, "test-repo", "test-snap") .setWaitForCompletion(true) .get(); assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); diff --git a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java index a332bcb599e90..c8c72855eaf7a 100644 --- a/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java +++ b/x-pack/qa/repository-old-versions/src/test/java/org/elasticsearch/oldrepos/OldRepositoryAccessIT.java @@ -199,7 +199,10 @@ private void beforeRestart( } Request createRepo = new Request("PUT", "/_snapshot/" + repoName); createRepo.setJsonEntity( - Strings.toString(new PutRepositoryRequest().type(sourceOnlyRepository ? "source" : "fs").settings(repoSettingsBuilder.build())) + Strings.toString( + new PutRepositoryRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).type(sourceOnlyRepository ? "source" : "fs") + .settings(repoSettingsBuilder.build()) + ) ); assertAcknowledged(client().performRequest(createRepo)); @@ -279,7 +282,9 @@ private void restoreMountAndVerify( // restore index Request restoreRequest = new Request("POST", "/_snapshot/" + repoName + "/" + snapshotName + "/_restore"); restoreRequest.setJsonEntity( - Strings.toString(new RestoreSnapshotRequest().indices(indexName).renamePattern("(.+)").renameReplacement("restored_$1")) + Strings.toString( + new RestoreSnapshotRequest(TEST_REQUEST_TIMEOUT).indices(indexName).renamePattern("(.+)").renameReplacement("restored_$1") + ) ); restoreRequest.addParameter("wait_for_completion", "true"); Response restoreResponse = client().performRequest(restoreRequest);