diff --git a/CHANGELOG.md b/CHANGELOG.md index 14c148e05ddc1..53296c629afef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,9 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ### Added - Add support for s390x architecture ([#4001](https://github.com/opensearch-project/OpenSearch/pull/4001)) - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) +- Point in time rest layer changes for create and delete PIT API ([#4064](https://github.com/opensearch-project/OpenSearch/pull/4064)) - Add failover support with Segment Replication enabled. ([#4325](https://github.com/opensearch-project/OpenSearch/pull/4325) +- Point in time rest layer changes for list PIT and PIT segments API ([#4388](https://github.com/opensearch-project/OpenSearch/pull/4388)) - Added @dreamer-89 as an Opensearch maintainer ([#4342](https://github.com/opensearch-project/OpenSearch/pull/4342)) - Added release notes for 1.3.5 ([#4343](https://github.com/opensearch-project/OpenSearch/pull/4343)) - Added release notes for 2.2.1 ([#4344](https://github.com/opensearch-project/OpenSearch/pull/4344)) @@ -46,6 +48,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Getting security exception due to access denied 'java.lang.RuntimePermission' 'accessDeclaredMembers' when trying to get snapshot with S3 IRSA ([#4469](https://github.com/opensearch-project/OpenSearch/pull/4469)) - Fixed flaky test `ResourceAwareTasksTests.testTaskIdPersistsInThreadContext` ([#4484](https://github.com/opensearch-project/OpenSearch/pull/4484)) - Fixed the ignore_malformed setting to also ignore objects ([#4494](https://github.com/opensearch-project/OpenSearch/pull/4494)) +- Fixed the `_cat/shards/10_basic.yml` test cases fix. - Updated jackson to 2.13.4 and snakeyml to 1.32 ([#4556](https://github.com/opensearch-project/OpenSearch/pull/4556)) - Fixed day of year defaulting for round up parser ([#4627](https://github.com/opensearch-project/OpenSearch/pull/4627)) - Fixed the SnapshotsInProgress error during index deletion ([#4570](https://github.com/opensearch-project/OpenSearch/pull/4570)) @@ -59,6 +62,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Add timing data and more granular stages to SegmentReplicationState ([#4367](https://github.com/opensearch-project/OpenSearch/pull/4367)) - BWC version 2.2.2 ([#4385](https://github.com/opensearch-project/OpenSearch/pull/4385)) +- Added RestLayer Changes for PIT stats ([#4217](https://github.com/opensearch-project/OpenSearch/pull/4217)) - BWC version 1.3.6 ([#4452](https://github.com/opensearch-project/OpenSearch/pull/4452)) - Bump current version to 2.4.0 on 2.x branch ([#4454](https://github.com/opensearch-project/OpenSearch/pull/4454)) - 2.3.0 release notes ([#4457](https://github.com/opensearch-project/OpenSearch/pull/4457)) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index c3a33bf9756e0..b3144a75d1445 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -54,6 +54,8 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -92,6 +94,7 @@ import org.opensearch.index.reindex.ReindexRequest; import org.opensearch.index.reindex.UpdateByQueryRequest; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.rest.action.search.RestCreatePitAction; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.script.mustache.MultiSearchTemplateRequest; import org.opensearch.script.mustache.SearchTemplateRequest; @@ -433,9 +436,19 @@ static void addSearchRequestParams(Params params, SearchRequest searchRequest) { params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); params.withRouting(searchRequest.routing()); params.withPreference(searchRequest.preference()); - params.withIndicesOptions(searchRequest.indicesOptions()); + if (searchRequest.pointInTimeBuilder() == null) { + params.withIndicesOptions(searchRequest.indicesOptions()); + } params.withSearchType(searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + /** + * Merging search responses as part of CCS flow to reduce roundtrips is not supported for point in time - + * refer to org.opensearch.action.search.SearchResponseMerger + */ + if (searchRequest.pointInTimeBuilder() != null) { + params.putParam("ccs_minimize_roundtrips", "false"); + } else { + params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + } if (searchRequest.getPreFilterShardSize() != null) { params.putParam("pre_filter_shard_size", Integer.toString(searchRequest.getPreFilterShardSize())); } @@ -464,6 +477,31 @@ static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOExcep return request; } + static Request createPit(CreatePitRequest createPitRequest) throws IOException { + Params params = new Params(); + params.putParam(RestCreatePitAction.ALLOW_PARTIAL_PIT_CREATION, Boolean.toString(createPitRequest.shouldAllowPartialPitCreation())); + params.putParam(RestCreatePitAction.KEEP_ALIVE, createPitRequest.getKeepAlive()); + params.withIndicesOptions(createPitRequest.indicesOptions()); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(createPitRequest.indices(), "_search/point_in_time")); + request.addParameters(params.asMap()); + request.setEntity(createEntity(createPitRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deletePit(DeletePitRequest deletePitRequest) throws IOException { + Request request = new Request(HttpDelete.METHOD_NAME, "/_search/point_in_time"); + request.setEntity(createEntity(deletePitRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteAllPits() { + return new Request(HttpDelete.METHOD_NAME, "/_search/point_in_time/_all"); + } + + static Request getAllPits() { + return new Request(HttpGet.METHOD_NAME, "/_search/point_in_time/_all"); + } + static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, "/_msearch"); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 28a441bdf7f7f..0a5880b778942 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -59,6 +59,11 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.GetAllPitNodesResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -1250,6 +1255,154 @@ public final Cancellable scrollAsync( ); } + /** + * Create PIT context using create PIT API + * + * @param createPitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final CreatePitResponse createPit(CreatePitRequest createPitRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + createPitRequest, + RequestConverters::createPit, + options, + CreatePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Create PIT context using create PIT API + * + * @param createPitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable createPitAsync( + CreatePitRequest createPitRequest, + RequestOptions options, + ActionListener listener + ) { + return performRequestAsyncAndParseEntity( + createPitRequest, + RequestConverters::createPit, + options, + CreatePitResponse::fromXContent, + listener, + emptySet() + ); + } + + /** + * Delete point in time searches using delete PIT API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final DeletePitResponse deletePit(DeletePitRequest deletePitRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + deletePitRequest, + RequestConverters::deletePit, + options, + DeletePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Delete point in time searches using delete PIT API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable deletePitAsync( + DeletePitRequest deletePitRequest, + RequestOptions options, + ActionListener listener + ) { + return performRequestAsyncAndParseEntity( + deletePitRequest, + RequestConverters::deletePit, + options, + DeletePitResponse::fromXContent, + listener, + emptySet() + ); + } + + /** + * Delete all point in time searches using delete all PITs API + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final DeletePitResponse deleteAllPits(RequestOptions options) throws IOException { + return performRequestAndParseEntity( + new MainRequest(), + (request) -> RequestConverters.deleteAllPits(), + options, + DeletePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Delete all point in time searches using delete all PITs API + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable deleteAllPitsAsync(RequestOptions options, ActionListener listener) { + return performRequestAsyncAndParseEntity( + new MainRequest(), + (request) -> RequestConverters.deleteAllPits(), + options, + DeletePitResponse::fromXContent, + listener, + emptySet() + ); + } + + /** + * Get all point in time searches using list all PITs API + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final GetAllPitNodesResponse getAllPits(RequestOptions options) throws IOException { + return performRequestAndParseEntity( + new MainRequest(), + (request) -> RequestConverters.getAllPits(), + options, + GetAllPitNodesResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously get all point in time searches using list all PITs API + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable getAllPitsAsync(RequestOptions options, ActionListener listener) { + return performRequestAsyncAndParseEntity( + new MainRequest(), + (request) -> RequestConverters.getAllPits(), + options, + GetAllPitNodesResponse::fromXContent, + listener, + emptySet() + ); + } + /** * Clears one or more scroll ids using the Clear Scroll API. * diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java new file mode 100644 index 0000000000000..cbb4db10cd519 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -0,0 +1,131 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.junit.Before; +import org.opensearch.OpenSearchStatusException; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.GetAllPitNodesResponse; +import org.opensearch.common.unit.TimeValue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +/** + * Tests point in time API with rest high level client + */ +public class PitIT extends OpenSearchRestHighLevelClientTestCase { + + @Before + public void indexDocuments() throws IOException { + Request doc1 = new Request(HttpPut.METHOD_NAME, "/index/_doc/1"); + doc1.setJsonEntity("{\"type\":\"type1\", \"id\":1, \"num\":10, \"num2\":50}"); + client().performRequest(doc1); + Request doc2 = new Request(HttpPut.METHOD_NAME, "/index/_doc/2"); + doc2.setJsonEntity("{\"type\":\"type1\", \"id\":2, \"num\":20, \"num2\":40}"); + client().performRequest(doc2); + Request doc3 = new Request(HttpPut.METHOD_NAME, "/index/_doc/3"); + doc3.setJsonEntity("{\"type\":\"type1\", \"id\":3, \"num\":50, \"num2\":35}"); + client().performRequest(doc3); + Request doc4 = new Request(HttpPut.METHOD_NAME, "/index/_doc/4"); + doc4.setJsonEntity("{\"type\":\"type2\", \"id\":4, \"num\":100, \"num2\":10}"); + client().performRequest(doc4); + Request doc5 = new Request(HttpPut.METHOD_NAME, "/index/_doc/5"); + doc5.setJsonEntity("{\"type\":\"type2\", \"id\":5, \"num\":100, \"num2\":10}"); + client().performRequest(doc5); + client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh")); + } + + public void testCreateAndDeletePit() throws IOException { + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); + CreatePitResponse createPitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(createPitResponse.getId() != null); + assertEquals(1, createPitResponse.getTotalShards()); + assertEquals(1, createPitResponse.getSuccessfulShards()); + assertEquals(0, createPitResponse.getFailedShards()); + assertEquals(0, createPitResponse.getSkippedShards()); + GetAllPitNodesResponse getAllPitResponse = highLevelClient().getAllPits(RequestOptions.DEFAULT); + List pits = getAllPitResponse.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()); + assertTrue(pits.contains(createPitResponse.getId())); + List pitIds = new ArrayList<>(); + pitIds.add(createPitResponse.getId()); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + DeletePitResponse deletePitResponse = execute(deletePitRequest, highLevelClient()::deletePit, highLevelClient()::deletePitAsync); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(createPitResponse.getId())); + } + + public void testDeleteAllAndListAllPits() throws IOException { + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + CreatePitResponse pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertTrue(pitResponse1.getId() != null); + DeletePitResponse deletePitResponse = highLevelClient().deleteAllPits(RequestOptions.DEFAULT); + for (DeletePitInfo deletePitInfo : deletePitResponse.getDeletePitResults()) { + assertTrue(deletePitInfo.isSuccessful()); + } + pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertTrue(pitResponse1.getId() != null); + GetAllPitNodesResponse getAllPitResponse = highLevelClient().getAllPits(RequestOptions.DEFAULT); + + List pits = getAllPitResponse.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()); + assertTrue(pits.contains(pitResponse.getId())); + assertTrue(pits.contains(pitResponse1.getId())); + ActionListener deletePitListener = new ActionListener<>() { + @Override + public void onResponse(DeletePitResponse response) { + for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { + assertTrue(deletePitInfo.isSuccessful()); + } + } + + @Override + public void onFailure(Exception e) { + if (!(e instanceof OpenSearchStatusException)) { + throw new AssertionError("Delete all failed"); + } + } + }; + final CreatePitResponse pitResponse3 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + + ActionListener getPitsListener = new ActionListener() { + @Override + public void onResponse(GetAllPitNodesResponse response) { + List pits = response.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()); + assertTrue(pits.contains(pitResponse3.getId())); + } + + @Override + public void onFailure(Exception e) { + if (!(e instanceof OpenSearchStatusException)) { + throw new AssertionError("List all PITs failed", e); + } + } + }; + highLevelClient().getAllPitsAsync(RequestOptions.DEFAULT, getPitsListener); + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + // validate no pits case + getAllPitResponse = highLevelClient().getAllPits(RequestOptions.DEFAULT); + assertTrue(getAllPitResponse.getPitInfos().size() == 0); + highLevelClient().deleteAllPitsAsync(RequestOptions.DEFAULT, deletePitListener); + } +} diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 0037f490d7d46..c9f5257d64d98 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -53,6 +53,8 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -131,6 +133,7 @@ import java.util.Locale; import java.util.Map; import java.util.StringJoiner; +import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -1303,6 +1306,47 @@ public void testClearScroll() throws IOException { assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); } + public void testCreatePit() throws IOException { + String[] indices = randomIndicesNames(0, 5); + Map expectedParams = new HashMap<>(); + expectedParams.put("keep_alive", "1d"); + expectedParams.put("allow_partial_pit_creation", "true"); + CreatePitRequest createPitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, indices); + setRandomIndicesOptions(createPitRequest::indicesOptions, createPitRequest::indicesOptions, expectedParams); + Request request = RequestConverters.createPit(createPitRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_search/point_in_time"); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(endpoint.toString(), request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(createPitRequest, request.getEntity()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + } + + public void testDeletePit() throws IOException { + List pitIdsList = new ArrayList<>(); + pitIdsList.add("pitId1"); + pitIdsList.add("pitId2"); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIdsList); + Request request = RequestConverters.deletePit(deletePitRequest); + String endpoint = "/_search/point_in_time"; + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + assertToXContentBody(deletePitRequest, request.getEntity()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + } + + public void testDeleteAllPits() { + Request request = RequestConverters.deleteAllPits(); + String endpoint = "/_search/point_in_time/_all"; + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + } + public void testSearchTemplate() throws Exception { // Create a random request. String[] indices = randomIndicesNames(0, 5); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 3da0f81023f72..ad8da7244eae0 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -134,6 +134,8 @@ public class RestHighLevelClientTests extends OpenSearchTestCase { // core "ping", "info", + "delete_all_pits", + "get_all_pits", // security "security.get_ssl_certificates", "security.authenticate", diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index 19e287fb91be5..8b509e5d19e92 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -43,6 +43,10 @@ import org.opensearch.action.fieldcaps.FieldCapabilitiesResponse; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -89,6 +93,7 @@ import org.opensearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; import org.opensearch.search.aggregations.support.MultiValuesSourceFieldConfig; import org.opensearch.search.aggregations.support.ValueType; +import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.subphase.FetchSourceContext; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; @@ -100,11 +105,13 @@ import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -762,6 +769,46 @@ public void testSearchScroll() throws Exception { } } + public void testSearchWithPit() throws Exception { + for (int i = 0; i < 100; i++) { + XContentBuilder builder = jsonBuilder().startObject().field("field", i).endObject(); + Request doc = new Request(HttpPut.METHOD_NAME, "/test/_doc/" + Integer.toString(i)); + doc.setJsonEntity(Strings.toString(builder)); + client().performRequest(doc); + } + client().performRequest(new Request(HttpPost.METHOD_NAME, "/test/_refresh")); + + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "test"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(35) + .sort("field", SortOrder.ASC) + .pointInTimeBuilder(new PointInTimeBuilder(pitResponse.getId())); + SearchRequest searchRequest = new SearchRequest().source(searchSourceBuilder); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + + try { + long counter = 0; + assertSearchHeader(searchResponse); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(35)); + for (SearchHit hit : searchResponse.getHits()) { + assertThat(((Number) hit.getSortValues()[0]).longValue(), equalTo(counter++)); + } + } finally { + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + DeletePitResponse deletePitResponse = execute( + deletePitRequest, + highLevelClient()::deletePit, + highLevelClient()::deletePitAsync + ); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(pitResponse.getId())); + } + } + public void testMultiSearch() throws Exception { MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); SearchRequest searchRequest1 = new SearchRequest("index1"); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json new file mode 100644 index 0000000000000..d3a2104c01bc0 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json @@ -0,0 +1,44 @@ + +{ + "create_pit":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Creates point in time context." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/{index}/_search/point_in_time", + "methods":[ + "POST" + ], + "parts":{ + "index":{ + "type":"list", + "description":"A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices" + } + } + } + ] + }, + "params":{ + "allow_partial_pit_creation":{ + "type":"boolean", + "description":"Allow if point in time can be created with partial failures" + }, + "keep_alive":{ + "type":"string", + "description":"Specify the keep alive for point in time" + }, + "preference":{ + "type":"string", + "description":"Specify the node or shard the operation should be performed on (default: random)" + }, + "routing":{ + "type":"list", + "description":"A comma-separated list of specific routing values" + } + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json new file mode 100644 index 0000000000000..5ff01aa746df9 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json @@ -0,0 +1,19 @@ +{ + "delete_all_pits":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Deletes all active point in time searches." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_search/point_in_time/_all", + "methods":[ + "DELETE" + ] + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json new file mode 100644 index 0000000000000..b54d9f76204f4 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json @@ -0,0 +1,23 @@ +{ + "delete_pit":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Deletes one or more point in time searches based on the IDs passed." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_search/point_in_time", + "methods":[ + "DELETE" + ] + } + ] + }, + "body":{ + "description":"A comma-separated list of pit IDs to clear", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get_all_pits.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get_all_pits.json new file mode 100644 index 0000000000000..544a8cb11b002 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get_all_pits.json @@ -0,0 +1,19 @@ +{ + "get_all_pits":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Lists all active point in time searches." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_search/point_in_time/_all", + "methods":[ + "GET" + ] + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index aa4abc7a11eae..189215b6562a3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,11 +1,14 @@ --- "Help": - skip: - version: " - 7.99.99" - reason: shard path stats were added in 8.0.0 + version: " - 2.3.99" + reason: point in time stats were added in 2.4.0 + features: node_selector - do: cat.shards: help: true + node_selector: + version: "2.4.0 - " - match: $body: | @@ -67,6 +70,9 @@ search.scroll_current .+ \n search.scroll_time .+ \n search.scroll_total .+ \n + search.point_in_time_current .+ \n + search.point_in_time_time .+ \n + search.point_in_time_total .+ \n segments.count .+ \n segments.memory .+ \n segments.index_writer_memory .+ \n @@ -82,6 +88,92 @@ path.state .+ \n $/ --- +"Help before - 2.4.0": + - skip: + version: "2.4.0 - " + reason: point in time stats were added in 2.4.0 + features: node_selector + - do: + cat.shards: + help: true + node_selector: + version: " - 2.3.99" + + - match: + $body: | + /^ index .+ \n + shard .+ \n + prirep .+ \n + state .+ \n + docs .+ \n + store .+ \n + ip .+ \n + id .+ \n + node .+ \n + sync_id .+ \n + unassigned.reason .+ \n + unassigned.at .+ \n + unassigned.for .+ \n + unassigned.details .+ \n + recoverysource.type .+ \n + completion.size .+ \n + fielddata.memory_size .+ \n + fielddata.evictions .+ \n + query_cache.memory_size .+ \n + query_cache.evictions .+ \n + flush.total .+ \n + flush.total_time .+ \n + get.current .+ \n + get.time .+ \n + get.total .+ \n + get.exists_time .+ \n + get.exists_total .+ \n + get.missing_time .+ \n + get.missing_total .+ \n + indexing.delete_current .+ \n + indexing.delete_time .+ \n + indexing.delete_total .+ \n + indexing.index_current .+ \n + indexing.index_time .+ \n + indexing.index_total .+ \n + indexing.index_failed .+ \n + merges.current .+ \n + merges.current_docs .+ \n + merges.current_size .+ \n + merges.total .+ \n + merges.total_docs .+ \n + merges.total_size .+ \n + merges.total_time .+ \n + refresh.total .+ \n + refresh.time .+ \n + refresh.external_total .+ \n + refresh.external_time .+ \n + refresh.listeners .+ \n + search.fetch_current .+ \n + search.fetch_time .+ \n + search.fetch_total .+ \n + search.open_contexts .+ \n + search.query_current .+ \n + search.query_time .+ \n + search.query_total .+ \n + search.scroll_current .+ \n + search.scroll_time .+ \n + search.scroll_total .+ \n + segments.count .+ \n + segments.memory .+ \n + segments.index_writer_memory .+ \n + segments.version_map_memory .+ \n + segments.fixed_bitset_memory .+ \n + seq_no.max .+ \n + seq_no.local_checkpoint .+ \n + seq_no.global_checkpoint .+ \n + warmer.current .+ \n + warmer.total .+ \n + warmer.total_time .+ \n + path.data .+ \n + path.state .+ \n + $/ +--- "Test cat shards output": - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml new file mode 100644 index 0000000000000..84ff85f465f8b --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml @@ -0,0 +1,142 @@ +"Create PIT, Search with PIT ID and Delete": + - skip: + version: " - 2.3.99" + reason: "mode to be introduced later than 2.4" + - do: + indices.create: + index: test_pit + - do: + index: + index: test_pit + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_pit + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + create_pit: + allow_partial_pit_creation: true + index: test_pit + keep_alive: 23h + + - set: {pit_id: pit_id} + - match: { _shards.failed: 0} + - do: + search: + rest_total_hits_as_int: true + size: 1 + sort: foo + body: + query: + match_all: {} + pit: {"id": "$pit_id"} + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + - do: + index: + index: test_pit + id: 44 + body: { foo: 3 } + + - do: + indices.refresh: {} + + - do: + search: + rest_total_hits_as_int: true + size: 1 + sort: foo + body: + query: + match_all: {} + pit: {"id": "$pit_id", "keep_alive":"10m"} + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + + - do: + search: + rest_total_hits_as_int: true + index: test_pit + size: 1 + sort: foo + body: + query: + match_all: {} + + - match: {hits.total: 3 } + - length: {hits.hits: 1 } + + - do: + get_all_pits: {} + + - match: {pits.0.pit_id: $pit_id} + - match: {pits.0.keep_alive: 82800000 } + + - do: + delete_pit: + body: + "pit_id": [$pit_id] + + - match: {pits.0.pit_id: $pit_id} + - match: {pits.0.successful: true } + +--- +"Delete all": + - skip: + version: " - 2.3.99" + reason: "mode to be introduced later than 2.4" + - do: + indices.create: + index: test_pit + - do: + index: + index: test_pit + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_pit + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + create_pit: + allow_partial_pit_creation: true + index: test_pit + keep_alive: 23h + + - set: {pit_id: pit_id} + - match: { _shards.failed: 0} + + - do: + get_all_pits: {} + + - match: {pits.0.pit_id: $pit_id} + - match: {pits.0.keep_alive: 82800000 } + + - do: + delete_all_pits: {} + + - match: {pits.0.pit_id: $pit_id} + - match: {pits.0.successful: true } + + - do: + catch: missing + delete_all_pits: { } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java index 926e21294ffc8..f33543e1114cb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java @@ -32,15 +32,21 @@ package org.opensearch.search.searchafter; +import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.UUIDs; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.search.SearchHit; +import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; import org.hamcrest.Matchers; @@ -155,6 +161,58 @@ public void testsShouldFail() throws Exception { } } + public void testPitWithSearchAfter() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").setMapping("field1", "type=long", "field2", "type=keyword").get()); + ensureGreen(); + indexRandom( + true, + client().prepareIndex("test").setId("0").setSource("field1", 0), + client().prepareIndex("test").setId("1").setSource("field1", 100, "field2", "toto"), + client().prepareIndex("test").setId("2").setSource("field1", 101), + client().prepareIndex("test").setId("3").setSource("field1", 99) + ); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "test" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchResponse sr = client().prepareSearch() + .addSort("field1", SortOrder.ASC) + .setQuery(matchAllQuery()) + .searchAfter(new Object[] { 99 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .get(); + assertEquals(2, sr.getHits().getHits().length); + sr = client().prepareSearch() + .addSort("field1", SortOrder.ASC) + .setQuery(matchAllQuery()) + .searchAfter(new Object[] { 100 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .get(); + assertEquals(1, sr.getHits().getHits().length); + sr = client().prepareSearch() + .addSort("field1", SortOrder.ASC) + .setQuery(matchAllQuery()) + .searchAfter(new Object[] { 0 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .get(); + assertEquals(3, sr.getHits().getHits().length); + /** + * Add new data and assert PIT results remain the same and normal search results gets refreshed + */ + indexRandom(true, client().prepareIndex("test").setId("4").setSource("field1", 102)); + sr = client().prepareSearch() + .addSort("field1", SortOrder.ASC) + .setQuery(matchAllQuery()) + .searchAfter(new Object[] { 0 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .get(); + assertEquals(3, sr.getHits().getHits().length); + sr = client().prepareSearch().addSort("field1", SortOrder.ASC).setQuery(matchAllQuery()).searchAfter(new Object[] { 0 }).get(); + assertEquals(4, sr.getHits().getHits().length); + client().admin().indices().prepareDelete("test").get(); + } + public void testWithNullStrings() throws InterruptedException { assertAcked(client().admin().indices().prepareCreate("test").setMapping("field2", "type=keyword").get()); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java index 9c735c42052e3..eacbcc42a8157 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java @@ -32,9 +32,13 @@ package org.opensearch.search.slice; +import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; @@ -46,6 +50,7 @@ import org.opensearch.search.Scroll; import org.opensearch.search.SearchException; import org.opensearch.search.SearchHit; +import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortBuilders; import org.opensearch.test.OpenSearchIntegTestCase; @@ -86,7 +91,12 @@ private void setupIndex(int numDocs, int numberOfShards) throws IOException, Exe client().admin() .indices() .prepareCreate("test") - .setSettings(Settings.builder().put("number_of_shards", numberOfShards).put("index.max_slices_per_scroll", 10000)) + .setSettings( + Settings.builder() + .put("number_of_shards", numberOfShards) + .put("index.max_slices_per_scroll", 10000) + .put("index.max_slices_per_pit", 10000) + ) .setMapping(mapping) ); ensureGreen(); @@ -129,6 +139,78 @@ public void testSearchSort() throws Exception { } } + public void testSearchSortWithoutPitOrScroll() throws Exception { + int numShards = randomIntBetween(1, 7); + int numDocs = randomIntBetween(100, 1000); + setupIndex(numDocs, numShards); + int fetchSize = randomIntBetween(10, 100); + SearchRequestBuilder request = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setSize(fetchSize) + .addSort(SortBuilders.fieldSort("_doc")); + SliceBuilder sliceBuilder = new SliceBuilder("_id", 0, 4); + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> request.slice(sliceBuilder).get()); + assertTrue(ex.getMessage().contains("all shards failed")); + } + + public void testSearchSortWithPIT() throws Exception { + int numShards = randomIntBetween(1, 7); + int numDocs = randomIntBetween(100, 1000); + setupIndex(numDocs, numShards); + int max = randomIntBetween(2, numShards * 3); + CreatePitRequest pitRequest = new CreatePitRequest(TimeValue.timeValueDays(1), true); + pitRequest.setIndices(new String[] { "test" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, pitRequest); + CreatePitResponse pitResponse = execute.get(); + for (String field : new String[] { "_id", "random_int", "static_int" }) { + int fetchSize = randomIntBetween(10, 100); + + // test _doc sort + SearchRequestBuilder request = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(fetchSize) + .addSort(SortBuilders.fieldSort("_doc")); + assertSearchSlicesWithPIT(request, field, max, numDocs); + + // test numeric sort + request = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(fetchSize) + .addSort(SortBuilders.fieldSort("random_int")); + assertSearchSlicesWithPIT(request, field, max, numDocs); + } + client().admin().indices().prepareDelete("test").get(); + } + + private void assertSearchSlicesWithPIT(SearchRequestBuilder request, String field, int numSlice, int numDocs) { + int totalResults = 0; + List keys = new ArrayList<>(); + for (int id = 0; id < numSlice; id++) { + SliceBuilder sliceBuilder = new SliceBuilder(field, id, numSlice); + SearchResponse searchResponse = request.slice(sliceBuilder).setFrom(0).get(); + totalResults += searchResponse.getHits().getHits().length; + int expectedSliceResults = (int) searchResponse.getHits().getTotalHits().value; + int numSliceResults = searchResponse.getHits().getHits().length; + for (SearchHit hit : searchResponse.getHits().getHits()) { + assertTrue(keys.add(hit.getId())); + } + while (searchResponse.getHits().getHits().length > 0) { + searchResponse = request.setFrom(numSliceResults).slice(sliceBuilder).get(); + totalResults += searchResponse.getHits().getHits().length; + numSliceResults += searchResponse.getHits().getHits().length; + for (SearchHit hit : searchResponse.getHits().getHits()) { + assertTrue(keys.add(hit.getId())); + } + } + assertThat(numSliceResults, equalTo(expectedSliceResults)); + } + assertThat(totalResults, equalTo(numDocs)); + assertThat(keys.size(), equalTo(numDocs)); + assertThat(new HashSet(keys).size(), equalTo(numDocs)); + } + public void testWithPreferenceAndRoutings() throws Exception { int numShards = 10; int totalDocs = randomIntBetween(100, 1000); @@ -217,7 +299,7 @@ public void testInvalidQuery() throws Exception { ); Throwable rootCause = findRootCause(exc); assertThat(rootCause.getClass(), equalTo(SearchException.class)); - assertThat(rootCause.getMessage(), equalTo("`slice` cannot be used outside of a scroll context")); + assertThat(rootCause.getMessage(), equalTo("`slice` cannot be used outside of a scroll context or PIT context")); } private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String field, int numSlice, int numDocs) { diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 0155ddb6c5eb0..b8cbba930951a 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -165,7 +165,9 @@ import org.opensearch.action.admin.indices.rollover.RolloverAction; import org.opensearch.action.admin.indices.rollover.TransportRolloverAction; import org.opensearch.action.admin.indices.segments.IndicesSegmentsAction; +import org.opensearch.action.admin.indices.segments.PitSegmentsAction; import org.opensearch.action.admin.indices.segments.TransportIndicesSegmentsAction; +import org.opensearch.action.admin.indices.segments.TransportPitSegmentsAction; import org.opensearch.action.admin.indices.settings.get.GetSettingsAction; import org.opensearch.action.admin.indices.settings.get.TransportGetSettingsAction; import org.opensearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; @@ -234,10 +236,16 @@ import org.opensearch.action.main.MainAction; import org.opensearch.action.main.TransportMainAction; import org.opensearch.action.search.ClearScrollAction; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.DeletePitAction; import org.opensearch.action.search.MultiSearchAction; +import org.opensearch.action.search.GetAllPitsAction; import org.opensearch.action.search.SearchAction; import org.opensearch.action.search.SearchScrollAction; import org.opensearch.action.search.TransportClearScrollAction; +import org.opensearch.action.search.TransportCreatePitAction; +import org.opensearch.action.search.TransportDeletePitAction; +import org.opensearch.action.search.TransportGetAllPitsAction; import org.opensearch.action.search.TransportMultiSearchAction; import org.opensearch.action.search.TransportSearchAction; import org.opensearch.action.search.TransportSearchScrollAction; @@ -375,6 +383,7 @@ import org.opensearch.rest.action.cat.RestClusterManagerAction; import org.opensearch.rest.action.cat.RestNodeAttrsAction; import org.opensearch.rest.action.cat.RestNodesAction; +import org.opensearch.rest.action.cat.RestPitSegmentsAction; import org.opensearch.rest.action.cat.RestPluginsAction; import org.opensearch.rest.action.cat.RestRepositoriesAction; import org.opensearch.rest.action.cat.RestSegmentsAction; @@ -400,7 +409,10 @@ import org.opensearch.rest.action.ingest.RestSimulatePipelineAction; import org.opensearch.rest.action.search.RestClearScrollAction; import org.opensearch.rest.action.search.RestCountAction; +import org.opensearch.rest.action.search.RestCreatePitAction; +import org.opensearch.rest.action.search.RestDeletePitAction; import org.opensearch.rest.action.search.RestExplainAction; +import org.opensearch.rest.action.search.RestGetAllPitsAction; import org.opensearch.rest.action.search.RestMultiSearchAction; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.rest.action.search.RestSearchScrollAction; @@ -664,6 +676,12 @@ public void reg // Remote Store actions.register(RestoreRemoteStoreAction.INSTANCE, TransportRestoreRemoteStoreAction.class); + // point in time actions + actions.register(CreatePitAction.INSTANCE, TransportCreatePitAction.class); + actions.register(DeletePitAction.INSTANCE, TransportDeletePitAction.class); + actions.register(PitSegmentsAction.INSTANCE, TransportPitSegmentsAction.class); + actions.register(GetAllPitsAction.INSTANCE, TransportGetAllPitsAction.class); + return unmodifiableMap(actions.getRegistry()); } @@ -835,6 +853,13 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestRepositoriesAction()); registerHandler.accept(new RestSnapshotAction()); registerHandler.accept(new RestTemplatesAction()); + + // Point in time API + registerHandler.accept(new RestCreatePitAction()); + registerHandler.accept(new RestDeletePitAction()); + registerHandler.accept(new RestGetAllPitsAction(nodesInCluster)); + registerHandler.accept(new RestPitSegmentsAction(nodesInCluster)); + for (ActionPlugin plugin : actionPlugins) { for (RestHandler handler : plugin.getRestHandlers( settings, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsAction.java new file mode 100644 index 0000000000000..b52ef32a91b16 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsAction.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.segments; + +import org.opensearch.action.ActionType; + +/** + * Action for retrieving segment information for PITs + */ +public class PitSegmentsAction extends ActionType { + + public static final PitSegmentsAction INSTANCE = new PitSegmentsAction(); + public static final String NAME = "indices:monitor/point_in_time/segments"; + + private PitSegmentsAction() { + super(NAME, IndicesSegmentResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java new file mode 100644 index 0000000000000..de0d390cddc4a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.indices.segments; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.broadcast.BroadcastRequest; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * Transport request for retrieving PITs segment information + */ +public class PitSegmentsRequest extends BroadcastRequest { + private boolean verbose = false; + private final List pitIds = new ArrayList<>(); + + public PitSegmentsRequest() { + this(Strings.EMPTY_ARRAY); + } + + public PitSegmentsRequest(StreamInput in) throws IOException { + super(in); + pitIds.addAll(Arrays.asList(in.readStringArray())); + verbose = in.readBoolean(); + } + + public PitSegmentsRequest(String... pitIds) { + super(pitIds); + this.pitIds.addAll(Arrays.asList(pitIds)); + } + + /** + * true if detailed information about each segment should be returned, + * false otherwise. + */ + public boolean isVerbose() { + return verbose; + } + + /** + * Sets the verbose option. + * @see #isVerbose() + */ + public void setVerbose(boolean v) { + verbose = v; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArrayNullable((pitIds == null) ? null : pitIds.toArray(new String[pitIds.size()])); + out.writeBoolean(verbose); + } + + public List getPitIds() { + return Collections.unmodifiableList(pitIds); + } + + public void clearAndSetPitIds(List pitIds) { + this.pitIds.clear(); + this.pitIds.addAll(pitIds); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (pitIds == null || pitIds.isEmpty()) { + validationException = addValidationError("no pit ids specified", validationException); + } + return validationException; + } + + public void fromXContent(XContentParser parser) throws IOException { + pitIds.clear(); + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Malformed content, must start with an object"); + } else { + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if ("pit_id".equals(currentFieldName)) { + if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token.isValue() == false) { + throw new IllegalArgumentException("pit_id array element should only contain PIT identifier"); + } + pitIds.add(parser.text()); + } + } else { + if (token.isValue() == false) { + throw new IllegalArgumentException("pit_id element should only contain PIT identifier"); + } + pitIds.add(parser.text()); + } + } else { + throw new IllegalArgumentException( + "Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] " + ); + } + } + } + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java new file mode 100644 index 0000000000000..9d4ece74a7270 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java @@ -0,0 +1,261 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.action.admin.indices.segments; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.ListPitInfo; +import org.opensearch.action.search.PitService; +import org.opensearch.action.search.SearchContextId; +import org.opensearch.action.search.SearchContextIdForNode; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.DefaultShardOperationFailedException; +import org.opensearch.action.support.broadcast.node.TransportBroadcastByNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.routing.AllocationId; +import org.opensearch.cluster.routing.PlainShardsIterator; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.ShardsIterator; +import org.opensearch.cluster.routing.UnassignedInfo; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Strings; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.search.SearchService; +import org.opensearch.search.internal.PitReaderContext; +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.opensearch.action.search.SearchContextId.decode; + +/** + * Transport action for retrieving segment information of PITs + */ +public class TransportPitSegmentsAction extends TransportBroadcastByNodeAction { + private final ClusterService clusterService; + private final IndicesService indicesService; + private final SearchService searchService; + private final NamedWriteableRegistry namedWriteableRegistry; + private final TransportService transportService; + private final PitService pitService; + + @Inject + public TransportPitSegmentsAction( + ClusterService clusterService, + TransportService transportService, + IndicesService indicesService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + SearchService searchService, + NamedWriteableRegistry namedWriteableRegistry, + PitService pitService + ) { + super( + PitSegmentsAction.NAME, + clusterService, + transportService, + actionFilters, + indexNameExpressionResolver, + PitSegmentsRequest::new, + ThreadPool.Names.MANAGEMENT + ); + this.clusterService = clusterService; + this.indicesService = indicesService; + this.searchService = searchService; + this.namedWriteableRegistry = namedWriteableRegistry; + this.transportService = transportService; + this.pitService = pitService; + } + + /** + * Execute PIT segments flow for all PITs or request PIT IDs + */ + @Override + protected void doExecute(Task task, PitSegmentsRequest request, ActionListener listener) { + List pitIds = request.getPitIds(); + if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { + pitService.getAllPits(ActionListener.wrap(response -> { + request.clearAndSetPitIds(response.getPitInfos().stream().map(ListPitInfo::getPitId).collect(Collectors.toList())); + super.doExecute(task, request, listener); + }, listener::onFailure)); + } else { + super.doExecute(task, request, listener); + } + } + + /** + * This adds list of shards on which we need to retrieve pit segments details + * @param clusterState the cluster state + * @param request the underlying request + * @param concreteIndices the concrete indices on which to execute the operation + */ + @Override + protected ShardsIterator shards(ClusterState clusterState, PitSegmentsRequest request, String[] concreteIndices) { + final ArrayList iterators = new ArrayList<>(); + for (String pitId : request.getPitIds()) { + SearchContextId searchContext = decode(namedWriteableRegistry, pitId); + for (Map.Entry entry : searchContext.shards().entrySet()) { + final SearchContextIdForNode perNode = entry.getValue(); + // check if node is part of local cluster + if (Strings.isEmpty(perNode.getClusterAlias())) { + final ShardId shardId = entry.getKey(); + iterators.add( + new PitAwareShardRouting( + pitId, + shardId, + perNode.getNode(), + null, + true, + ShardRoutingState.STARTED, + null, + null, + null, + -1L + ) + ); + } + } + } + return new PlainShardsIterator(iterators); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, PitSegmentsRequest request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, PitSegmentsRequest countRequest, String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices); + } + + @Override + protected ShardSegments readShardResult(StreamInput in) throws IOException { + return new ShardSegments(in); + } + + @Override + protected IndicesSegmentResponse newResponse( + PitSegmentsRequest request, + int totalShards, + int successfulShards, + int failedShards, + List results, + List shardFailures, + ClusterState clusterState + ) { + return new IndicesSegmentResponse( + results.toArray(new ShardSegments[results.size()]), + totalShards, + successfulShards, + failedShards, + shardFailures + ); + } + + @Override + protected PitSegmentsRequest readRequestFrom(StreamInput in) throws IOException { + return new PitSegmentsRequest(in); + } + + @Override + public List getShardRoutingsFromInputStream(StreamInput in) throws IOException { + return in.readList(PitAwareShardRouting::new); + } + + /** + * This retrieves segment details of PIT context + * @param request the node-level request + * @param shardRouting the shard on which to execute the operation + */ + @Override + protected ShardSegments shardOperation(PitSegmentsRequest request, ShardRouting shardRouting) { + assert shardRouting instanceof PitAwareShardRouting; + PitAwareShardRouting pitAwareShardRouting = (PitAwareShardRouting) shardRouting; + SearchContextIdForNode searchContextIdForNode = decode(namedWriteableRegistry, pitAwareShardRouting.getPitId()).shards() + .get(shardRouting.shardId()); + PitReaderContext pitReaderContext = searchService.getPitReaderContext(searchContextIdForNode.getSearchContextId()); + if (pitReaderContext == null) { + return new ShardSegments(shardRouting, Collections.emptyList()); + } + return new ShardSegments(pitReaderContext.getShardRouting(), pitReaderContext.getSegments()); + } + + /** + * This holds PIT id which is used to perform broadcast operation in PIT shards to retrieve segments information + */ + public class PitAwareShardRouting extends ShardRouting { + + private final String pitId; + + public PitAwareShardRouting(StreamInput in) throws IOException { + super(in); + this.pitId = in.readString(); + } + + public PitAwareShardRouting( + String pitId, + ShardId shardId, + String currentNodeId, + String relocatingNodeId, + boolean primary, + ShardRoutingState state, + RecoverySource recoverySource, + UnassignedInfo unassignedInfo, + AllocationId allocationId, + long expectedShardSize + ) { + super( + shardId, + currentNodeId, + relocatingNodeId, + primary, + state, + recoverySource, + unassignedInfo, + allocationId, + expectedShardSize + ); + this.pitId = pitId; + } + + public String getPitId() { + return pitId; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(pitId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + super.toXContent(builder, params); + builder.field("pit_id", pitId); + return builder.endObject(); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitAction.java b/server/src/main/java/org/opensearch/action/search/CreatePitAction.java new file mode 100644 index 0000000000000..7ffa30a182458 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/CreatePitAction.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; + +/** + * Action type for creating PIT reader context + */ +public class CreatePitAction extends ActionType { + public static final CreatePitAction INSTANCE = new CreatePitAction(); + public static final String NAME = "indices:data/read/point_in_time/create"; + + private CreatePitAction() { + super(NAME, CreatePitResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitController.java b/server/src/main/java/org/opensearch/action/search/CreatePitController.java new file mode 100644 index 0000000000000..745139fd1f1e8 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/CreatePitController.java @@ -0,0 +1,329 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Strings; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.shard.ShardId; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.tasks.Task; +import org.opensearch.transport.Transport; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.stream.Collectors; + +import static org.opensearch.common.unit.TimeValue.timeValueSeconds; + +/** + * Controller for creating PIT reader context + * Phase 1 of create PIT request : Create PIT reader contexts in the associated shards with a temporary keep alive + * Phase 2 of create PIT : Update PIT reader context with PIT ID and keep alive from request and + * fail user request if any of the updates in this phase are failed - we clean up PITs in case of such failures. + * This two phase approach is used to save PIT ID as part of context which is later used for other use cases like list PIT etc. + */ +public class CreatePitController { + private final SearchTransportService searchTransportService; + private final ClusterService clusterService; + private final TransportSearchAction transportSearchAction; + private final NamedWriteableRegistry namedWriteableRegistry; + private final PitService pitService; + private static final Logger logger = LogManager.getLogger(CreatePitController.class); + public static final Setting PIT_INIT_KEEP_ALIVE = Setting.positiveTimeSetting( + "point_in_time.init.keep_alive", + timeValueSeconds(30), + Setting.Property.NodeScope + ); + + @Inject + public CreatePitController( + SearchTransportService searchTransportService, + ClusterService clusterService, + TransportSearchAction transportSearchAction, + NamedWriteableRegistry namedWriteableRegistry, + PitService pitService + ) { + this.searchTransportService = searchTransportService; + this.clusterService = clusterService; + this.transportSearchAction = transportSearchAction; + this.namedWriteableRegistry = namedWriteableRegistry; + this.pitService = pitService; + } + + /** + * This method creates PIT reader context + */ + public void executeCreatePit( + CreatePitRequest request, + Task task, + StepListener createPitListener, + ActionListener updatePitIdListener + ) { + SearchRequest searchRequest = new SearchRequest(request.getIndices()); + searchRequest.preference(request.getPreference()); + searchRequest.routing(request.getRouting()); + searchRequest.indicesOptions(request.getIndicesOptions()); + searchRequest.allowPartialSearchResults(request.shouldAllowPartialPitCreation()); + SearchTask searchTask = searchRequest.createTask( + task.getId(), + task.getType(), + task.getAction(), + task.getParentTaskId(), + Collections.emptyMap() + ); + /** + * This is needed for cross cluster functionality to work with PITs and current ccsMinimizeRoundTrips is + * not supported for point in time + */ + searchRequest.setCcsMinimizeRoundtrips(false); + /** + * Phase 1 of create PIT + */ + executeCreatePit(searchTask, searchRequest, createPitListener); + + /** + * Phase 2 of create PIT where we update pit id in pit contexts + */ + createPitListener.whenComplete( + searchResponse -> { executeUpdatePitId(request, searchRequest, searchResponse, updatePitIdListener); }, + updatePitIdListener::onFailure + ); + } + + /** + * Creates PIT reader context with temporary keep alive + */ + void executeCreatePit(Task task, SearchRequest searchRequest, StepListener createPitListener) { + logger.debug( + () -> new ParameterizedMessage("Executing creation of PIT context for indices [{}]", Arrays.toString(searchRequest.indices())) + ); + transportSearchAction.executeRequest( + task, + searchRequest, + TransportCreatePitAction.CREATE_PIT_ACTION, + true, + new TransportSearchAction.SinglePhaseSearchAction() { + @Override + public void executeOnShardTarget( + SearchTask searchTask, + SearchShardTarget target, + Transport.Connection connection, + ActionListener searchPhaseResultActionListener + ) { + searchTransportService.createPitContext( + connection, + new TransportCreatePitAction.CreateReaderContextRequest( + target.getShardId(), + PIT_INIT_KEEP_ALIVE.get(clusterService.getSettings()) + ), + searchTask, + ActionListener.wrap(r -> searchPhaseResultActionListener.onResponse(r), searchPhaseResultActionListener::onFailure) + ); + } + }, + createPitListener + ); + } + + /** + * Updates PIT ID, keep alive and createdTime of PIT reader context + */ + void executeUpdatePitId( + CreatePitRequest request, + SearchRequest searchRequest, + SearchResponse searchResponse, + ActionListener updatePitIdListener + ) { + logger.debug( + () -> new ParameterizedMessage( + "Updating PIT context with PIT ID [{}], creation time and keep alive", + searchResponse.pointInTimeId() + ) + ); + /** + * store the create time ( same create time for all PIT contexts across shards ) to be used + * for list PIT api + */ + final long relativeStartNanos = System.nanoTime(); + final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + searchRequest.getOrCreateAbsoluteStartMillis(), + relativeStartNanos, + System::nanoTime + ); + final long creationTime = timeProvider.getAbsoluteStartMillis(); + CreatePitResponse createPITResponse = new CreatePitResponse( + searchResponse.pointInTimeId(), + creationTime, + searchResponse.getTotalShards(), + searchResponse.getSuccessfulShards(), + searchResponse.getSkippedShards(), + searchResponse.getFailedShards(), + searchResponse.getShardFailures() + ); + SearchContextId contextId = SearchContextId.decode(namedWriteableRegistry, createPITResponse.getId()); + final StepListener> lookupListener = getConnectionLookupListener(contextId); + lookupListener.whenComplete(nodelookup -> { + final ActionListener groupedActionListener = getGroupedListener( + updatePitIdListener, + createPITResponse, + contextId.shards().size(), + contextId.shards().values() + ); + for (Map.Entry entry : contextId.shards().entrySet()) { + DiscoveryNode node = nodelookup.apply(entry.getValue().getClusterAlias(), entry.getValue().getNode()); + if (node == null) { + node = this.clusterService.state().getNodes().get(entry.getValue().getNode()); + } + if (node == null) { + logger.error( + () -> new ParameterizedMessage( + "Create pit update phase for PIT ID [{}] failed " + "because node [{}] not found", + searchResponse.pointInTimeId(), + entry.getValue().getNode() + ) + ); + groupedActionListener.onFailure( + new OpenSearchException( + "Create pit update phase for PIT ID [" + + searchResponse.pointInTimeId() + + "] failed because node[" + + entry.getValue().getNode() + + "] " + + "not found" + ) + ); + return; + } + try { + final Transport.Connection connection = searchTransportService.getConnection(entry.getValue().getClusterAlias(), node); + searchTransportService.updatePitContext( + connection, + new UpdatePitContextRequest( + entry.getValue().getSearchContextId(), + createPITResponse.getId(), + request.getKeepAlive().millis(), + creationTime + ), + groupedActionListener + ); + } catch (Exception e) { + String nodeName = node.getName(); + logger.error( + () -> new ParameterizedMessage( + "Create pit update phase failed for PIT ID [{}] on node [{}]", + searchResponse.pointInTimeId(), + nodeName + ), + e + ); + groupedActionListener.onFailure( + new OpenSearchException( + "Create pit update phase for PIT ID [" + searchResponse.pointInTimeId() + "] failed on node[" + node + "]", + e + ) + ); + } + } + }, updatePitIdListener::onFailure); + } + + private StepListener> getConnectionLookupListener(SearchContextId contextId) { + ClusterState state = clusterService.state(); + final Set clusters = contextId.shards() + .values() + .stream() + .filter(ctx -> Strings.isEmpty(ctx.getClusterAlias()) == false) + .map(SearchContextIdForNode::getClusterAlias) + .collect(Collectors.toSet()); + return (StepListener>) SearchUtils.getConnectionLookupListener( + searchTransportService.getRemoteClusterService(), + state, + clusters + ); + } + + private ActionListener getGroupedListener( + ActionListener updatePitIdListener, + CreatePitResponse createPITResponse, + int size, + Collection contexts + ) { + return new GroupedActionListener<>(new ActionListener<>() { + @Override + public void onResponse(final Collection responses) { + updatePitIdListener.onResponse(createPITResponse); + } + + @Override + public void onFailure(final Exception e) { + cleanupContexts(contexts, createPITResponse.getId()); + updatePitIdListener.onFailure(e); + } + }, size); + } + + /** + * Cleanup all created PIT contexts in case of failure + */ + private void cleanupContexts(Collection contexts, String pitId) { + ActionListener deleteListener = new ActionListener<>() { + @Override + public void onResponse(DeletePitResponse response) { + // this is invoke and forget call + final StringBuilder failedPitsStringBuilder = new StringBuilder(); + response.getDeletePitResults() + .stream() + .filter(r -> !r.isSuccessful()) + .forEach(r -> failedPitsStringBuilder.append(r.getPitId()).append(",")); + logger.warn(() -> new ParameterizedMessage("Failed to delete PIT IDs {}", failedPitsStringBuilder.toString())); + if (logger.isDebugEnabled()) { + final StringBuilder successfulPitsStringBuilder = new StringBuilder(); + response.getDeletePitResults() + .stream() + .filter(r -> r.isSuccessful()) + .forEach(r -> successfulPitsStringBuilder.append(r.getPitId()).append(",")); + logger.debug(() -> new ParameterizedMessage("Deleted PIT with IDs {}", successfulPitsStringBuilder.toString())); + } + } + + @Override + public void onFailure(Exception e) { + logger.error("Cleaning up PIT contexts failed ", e); + } + }; + Map> nodeToContextsMap = new HashMap<>(); + for (SearchContextIdForNode context : contexts) { + List contextIdsForNode = nodeToContextsMap.getOrDefault(context.getNode(), new ArrayList<>()); + contextIdsForNode.add(new PitSearchContextIdForNode(pitId, context)); + nodeToContextsMap.put(context.getNode(), contextIdsForNode); + } + pitService.deletePitContexts(nodeToContextsMap, deleteListener); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java b/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java new file mode 100644 index 0000000000000..45d6d9e2c9f54 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java @@ -0,0 +1,195 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.IndicesRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.Nullable; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskId; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * A request to make create point in time against one or more indices. + */ +public class CreatePitRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContent { + + // keep alive for pit reader context + private TimeValue keepAlive; + + // this describes whether PIT can be created with partial failures + private Boolean allowPartialPitCreation; + @Nullable + private String routing = null; + @Nullable + private String preference = null; + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = SearchRequest.DEFAULT_INDICES_OPTIONS; + + public CreatePitRequest(TimeValue keepAlive, Boolean allowPartialPitCreation, String... indices) { + this.keepAlive = keepAlive; + this.allowPartialPitCreation = allowPartialPitCreation; + this.indices = indices; + } + + public CreatePitRequest(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + routing = in.readOptionalString(); + preference = in.readOptionalString(); + keepAlive = in.readTimeValue(); + allowPartialPitCreation = in.readOptionalBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + out.writeOptionalString(routing); + out.writeOptionalString(preference); + out.writeTimeValue(keepAlive); + out.writeOptionalBoolean(allowPartialPitCreation); + } + + public String getRouting() { + return routing; + } + + public String getPreference() { + return preference; + } + + public String[] getIndices() { + return indices; + } + + public IndicesOptions getIndicesOptions() { + return indicesOptions; + } + + public TimeValue getKeepAlive() { + return keepAlive; + } + + /** + * Sets if this request should allow partial results. + */ + public void allowPartialPitCreation(Boolean allowPartialPitCreation) { + this.allowPartialPitCreation = allowPartialPitCreation; + } + + public boolean shouldAllowPartialPitCreation() { + return allowPartialPitCreation; + } + + public void setRouting(String routing) { + this.routing = routing; + } + + public void setPreference(String preference) { + this.preference = preference; + } + + public void setIndices(String[] indices) { + this.indices = indices; + } + + public void setIndicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (keepAlive == null) { + validationException = addValidationError("keep alive not specified", validationException); + } + return validationException; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public CreatePitRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); + return this; + } + + public void setKeepAlive(TimeValue keepAlive) { + this.keepAlive = keepAlive; + } + + public final String buildDescription() { + StringBuilder sb = new StringBuilder(); + sb.append("indices["); + Strings.arrayToDelimitedString(indices, ",", sb); + sb.append("], "); + sb.append("pointintime[").append(keepAlive).append("], "); + sb.append("allowPartialPitCreation[").append(allowPartialPitCreation).append("], "); + return sb.toString(); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new Task(id, type, action, this.buildDescription(), parentTaskId, headers); + } + + private void validateIndices(String... indices) { + Objects.requireNonNull(indices, "indices must not be null"); + for (String index : indices) { + Objects.requireNonNull(index, "index must not be null"); + } + } + + @Override + public CreatePitRequest indices(String... indices) { + validateIndices(indices); + this.indices = indices; + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("keep_alive", keepAlive); + builder.field("allow_partial_pit_creation", allowPartialPitCreation); + if (indices != null) { + builder.startArray("indices"); + for (String index : indices) { + builder.value(index); + } + builder.endArray(); + } + if (indicesOptions != null) { + indicesOptions.toXContent(builder, params); + } + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java new file mode 100644 index 0000000000000..dd197a37f8616 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java @@ -0,0 +1,232 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionResponse; +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.rest.RestStatus; +import org.opensearch.rest.action.RestActions; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +/** + * Create point in time response with point in time id and shard success / failures + */ +public class CreatePitResponse extends ActionResponse implements StatusToXContentObject { + private static final ParseField ID = new ParseField("pit_id"); + private static final ParseField CREATION_TIME = new ParseField("creation_time"); + + // point in time id + private final String id; + private final int totalShards; + private final int successfulShards; + private final int failedShards; + private final int skippedShards; + private final ShardSearchFailure[] shardFailures; + private final long creationTime; + + public CreatePitResponse(StreamInput in) throws IOException { + super(in); + id = in.readString(); + totalShards = in.readVInt(); + successfulShards = in.readVInt(); + failedShards = in.readVInt(); + skippedShards = in.readVInt(); + creationTime = in.readLong(); + int size = in.readVInt(); + if (size == 0) { + shardFailures = ShardSearchFailure.EMPTY_ARRAY; + } else { + shardFailures = new ShardSearchFailure[size]; + for (int i = 0; i < shardFailures.length; i++) { + shardFailures[i] = ShardSearchFailure.readShardSearchFailure(in); + } + } + } + + public CreatePitResponse( + String id, + long creationTime, + int totalShards, + int successfulShards, + int skippedShards, + int failedShards, + ShardSearchFailure[] shardFailures + ) { + this.id = id; + this.creationTime = creationTime; + this.totalShards = totalShards; + this.successfulShards = successfulShards; + this.skippedShards = skippedShards; + this.failedShards = failedShards; + this.shardFailures = shardFailures; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), id); + RestActions.buildBroadcastShardsHeader( + builder, + params, + getTotalShards(), + getSuccessfulShards(), + getSkippedShards(), + getFailedShards(), + getShardFailures() + ); + builder.field(CREATION_TIME.getPreferredName(), creationTime); + builder.endObject(); + return builder; + } + + /** + * Parse the create PIT response body into a new {@link CreatePitResponse} object + */ + public static CreatePitResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + parser.nextToken(); + return innerFromXContent(parser); + } + + public static CreatePitResponse innerFromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); + String currentFieldName = parser.currentName(); + int successfulShards = -1; + int totalShards = -1; + int skippedShards = 0; + int failedShards = 0; + String id = null; + long creationTime = 0; + List failures = new ArrayList<>(); + for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (CREATION_TIME.match(currentFieldName, parser.getDeprecationHandler())) { + creationTime = parser.longValue(); + } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { + id = parser.text(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (RestActions._SHARDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (RestActions.FAILED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + failedShards = parser.intValue(); // we don't need it but need to consume it + } else if (RestActions.SUCCESSFUL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + successfulShards = parser.intValue(); + } else if (RestActions.TOTAL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + totalShards = parser.intValue(); + } else if (RestActions.SKIPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + skippedShards = parser.intValue(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (RestActions.FAILURES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + failures.add(ShardSearchFailure.fromXContent(parser)); + } + } else { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + } else { + parser.skipChildren(); + } + } + } + + return new CreatePitResponse( + id, + creationTime, + totalShards, + successfulShards, + skippedShards, + failedShards, + failures.toArray(ShardSearchFailure.EMPTY_ARRAY) + ); + } + + public long getCreationTime() { + return creationTime; + } + + /** + * The failed number of shards the search was executed on. + */ + public int getFailedShards() { + return shardFailures.length; + } + + /** + * The failures that occurred during the search. + */ + public ShardSearchFailure[] getShardFailures() { + return this.shardFailures; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + out.writeVInt(totalShards); + out.writeVInt(successfulShards); + out.writeVInt(failedShards); + out.writeVInt(skippedShards); + out.writeLong(creationTime); + out.writeVInt(shardFailures.length); + for (ShardSearchFailure shardSearchFailure : shardFailures) { + shardSearchFailure.writeTo(out); + } + } + + public String getId() { + return id; + } + + /** + * The total number of shards the create pit operation was executed on. + */ + public int getTotalShards() { + return totalShards; + } + + /** + * The successful number of shards the create pit operation was executed on. + */ + public int getSuccessfulShards() { + return successfulShards; + } + + public int getSkippedShards() { + return skippedShards; + } + + @Override + public RestStatus status() { + return RestStatus.status(successfulShards, totalShards, shardFailures); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitAction.java b/server/src/main/java/org/opensearch/action/search/DeletePitAction.java new file mode 100644 index 0000000000000..aa305ecfe73ab --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/DeletePitAction.java @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; + +/** + * Action type for deleting point in time searches + */ +public class DeletePitAction extends ActionType { + + public static final DeletePitAction INSTANCE = new DeletePitAction(); + public static final String NAME = "indices:data/read/point_in_time/delete"; + + private DeletePitAction() { + super(NAME, DeletePitResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java new file mode 100644 index 0000000000000..5a167c5a6f160 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * This class captures if deletion of pit is successful along with pit id + */ +public class DeletePitInfo extends TransportResponse implements Writeable, ToXContent { + /** + * This will be true if PIT reader contexts are deleted ond also if contexts are not found. + */ + private final boolean successful; + + private final String pitId; + + public DeletePitInfo(boolean successful, String pitId) { + this.successful = successful; + this.pitId = pitId; + } + + public DeletePitInfo(StreamInput in) throws IOException { + successful = in.readBoolean(); + pitId = in.readString(); + + } + + public boolean isSuccessful() { + return successful; + } + + public String getPitId() { + return pitId; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(successful); + out.writeString(pitId); + } + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "delete_pit_info", + true, + args -> new DeletePitInfo((boolean) args[0], (String) args[1]) + ); + + static { + PARSER.declareBoolean(constructorArg(), new ParseField("successful")); + PARSER.declareString(constructorArg(), new ParseField("pit_id")); + } + + private static final ParseField SUCCESSFUL = new ParseField("successful"); + private static final ParseField PIT_ID = new ParseField("pit_id"); + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(SUCCESSFUL.getPreferredName(), successful); + builder.field(PIT_ID.getPreferredName(), pitId); + builder.endObject(); + return builder; + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java new file mode 100644 index 0000000000000..926e9c19a33f5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java @@ -0,0 +1,126 @@ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.ToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * Request to delete one or more PIT search contexts based on IDs. + */ +public class DeletePitRequest extends ActionRequest implements ToXContentObject { + + /** + * List of PIT IDs to be deleted , and use "_all" to delete all PIT reader contexts + */ + private final List pitIds = new ArrayList<>(); + + public DeletePitRequest(StreamInput in) throws IOException { + super(in); + pitIds.addAll(Arrays.asList(in.readStringArray())); + } + + public DeletePitRequest(String... pitIds) { + this.pitIds.addAll(Arrays.asList(pitIds)); + } + + public DeletePitRequest(List pitIds) { + this.pitIds.addAll(pitIds); + } + + public void clearAndSetPitIds(List pitIds) { + this.pitIds.clear(); + this.pitIds.addAll(pitIds); + } + + public DeletePitRequest() {} + + public List getPitIds() { + return pitIds; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (pitIds == null || pitIds.isEmpty()) { + validationException = addValidationError("no pit ids specified", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (pitIds == null) { + out.writeVInt(0); + } else { + out.writeStringArray(pitIds.toArray(new String[pitIds.size()])); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.startArray("pit_id"); + for (String pitId : pitIds) { + builder.value(pitId); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + public void fromXContent(XContentParser parser) throws IOException { + pitIds.clear(); + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Malformed content, must start with an object"); + } else { + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if ("pit_id".equals(currentFieldName)) { + if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token.isValue() == false) { + throw new IllegalArgumentException("pit_id array element should only contain pit_id"); + } + pitIds.add(parser.text()); + } + } else { + if (token.isValue() == false) { + throw new IllegalArgumentException("pit_id element should only contain pit_id"); + } + pitIds.add(parser.text()); + } + } else { + throw new IllegalArgumentException( + "Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] " + ); + } + } + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java new file mode 100644 index 0000000000000..cdbeb3dc2b749 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionResponse; +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.rest.RestStatus; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.opensearch.rest.RestStatus.NOT_FOUND; +import static org.opensearch.rest.RestStatus.OK; + +/** + * Response class for delete pits flow which clears the point in time search contexts + */ +public class DeletePitResponse extends ActionResponse implements StatusToXContentObject { + + private final List deletePitResults; + + public DeletePitResponse(List deletePitResults) { + this.deletePitResults = deletePitResults; + } + + public DeletePitResponse(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + deletePitResults = new ArrayList<>(); + for (int i = 0; i < size; i++) { + deletePitResults.add(new DeletePitInfo(in)); + } + + } + + public List getDeletePitResults() { + return deletePitResults; + } + + /** + * @return Whether the attempt to delete PIT was successful. + */ + @Override + public RestStatus status() { + if (deletePitResults.isEmpty()) return NOT_FOUND; + return OK; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(deletePitResults.size()); + for (DeletePitInfo deletePitResult : deletePitResults) { + deletePitResult.writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.startArray("pits"); + for (DeletePitInfo response : deletePitResults) { + response.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "delete_pit_response", + true, + (Object[] parsedObjects) -> { + @SuppressWarnings("unchecked") + List deletePitInfoList = (List) parsedObjects[0]; + return new DeletePitResponse(deletePitInfoList); + } + ); + static { + PARSER.declareObjectArray(constructorArg(), DeletePitInfo.PARSER, new ParseField("pits")); + } + + public static DeletePitResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java new file mode 100644 index 0000000000000..c90f75e3c0aed --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java @@ -0,0 +1,35 @@ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.support.nodes.BaseNodeRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Inner node get all pits request + */ +public class GetAllPitNodeRequest extends BaseNodeRequest { + + public GetAllPitNodeRequest() { + super(); + } + + public GetAllPitNodeRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java new file mode 100644 index 0000000000000..ba308a1a6ea1e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeResponse.java @@ -0,0 +1,69 @@ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContentFragment; +import org.opensearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +/** + * Inner node get all pits response + */ +public class GetAllPitNodeResponse extends BaseNodeResponse implements ToXContentFragment { + + /** + * List of active PITs in the associated node + */ + private final List pitInfos; + + public GetAllPitNodeResponse(DiscoveryNode node, List pitInfos) { + super(node); + if (pitInfos == null) { + throw new IllegalArgumentException("Pits info cannot be null"); + } + this.pitInfos = Collections.unmodifiableList(pitInfos); + } + + public GetAllPitNodeResponse(StreamInput in) throws IOException { + super(in); + this.pitInfos = Collections.unmodifiableList(in.readList(ListPitInfo::new)); + } + + public List getPitInfos() { + return pitInfos; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(pitInfos); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("node", this.getNode().getName()); + builder.startArray("pitInfos"); + for (ListPitInfo pit : pitInfos) { + pit.toXContent(builder, params); + } + + builder.endArray(); + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java new file mode 100644 index 0000000000000..b4ad2f6641087 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesRequest.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.support.nodes.BaseNodesRequest; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request to get all active PIT IDs from all nodes of cluster + */ +public class GetAllPitNodesRequest extends BaseNodesRequest { + + @Inject + public GetAllPitNodesRequest(DiscoveryNode... concreteNodes) { + super(concreteNodes); + } + + public GetAllPitNodesRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java new file mode 100644 index 0000000000000..610520a4c1f9d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodesResponse.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.ClusterName; +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.ToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * This class transforms active PIT objects from all nodes to unique PIT objects + */ +public class GetAllPitNodesResponse extends BaseNodesResponse implements ToXContentObject { + + /** + * List of unique PITs across all nodes + */ + private final Set pitInfos = new HashSet<>(); + + public GetAllPitNodesResponse(StreamInput in) throws IOException { + super(in); + } + + public GetAllPitNodesResponse( + ClusterName clusterName, + List getAllPitNodeResponseList, + List failures + ) { + super(clusterName, getAllPitNodeResponseList, failures); + Set uniquePitIds = new HashSet<>(); + pitInfos.addAll( + getAllPitNodeResponseList.stream() + .flatMap(p -> p.getPitInfos().stream().filter(t -> uniquePitIds.add(t.getPitId()))) + .collect(Collectors.toList()) + ); + } + + /** + * Copy constructor that explicitly sets the list pit infos + */ + public GetAllPitNodesResponse(List listPitInfos, GetAllPitNodesResponse response) { + super(response.getClusterName(), response.getNodes(), response.failures()); + pitInfos.addAll(listPitInfos); + } + + public GetAllPitNodesResponse( + List listPitInfos, + ClusterName clusterName, + List getAllPitNodeResponseList, + List failures + ) { + super(clusterName, getAllPitNodeResponseList, failures); + pitInfos.addAll(listPitInfos); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray("pits"); + for (ListPitInfo pit : pitInfos) { + pit.toXContent(builder, params); + } + builder.endArray(); + if (!failures().isEmpty()) { + builder.startArray("failures"); + for (FailedNodeException e : failures()) { + e.toXContent(builder, params); + } + } + builder.endObject(); + return builder; + } + + @Override + public List readNodesFrom(StreamInput in) throws IOException { + return in.readList(GetAllPitNodeResponse::new); + } + + @Override + public void writeNodesTo(StreamOutput out, List nodes) throws IOException { + out.writeList(nodes); + } + + public List getPitInfos() { + return Collections.unmodifiableList(new ArrayList<>(pitInfos)); + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_all_pits_response", + true, + (Object[] parsedObjects) -> { + @SuppressWarnings("unchecked") + List listPitInfos = (List) parsedObjects[0]; + List failures = null; + if (parsedObjects.length > 1) { + failures = (List) parsedObjects[1]; + } + if (failures == null) { + failures = new ArrayList<>(); + } + return new GetAllPitNodesResponse(listPitInfos, new ClusterName(""), new ArrayList<>(), failures); + } + ); + static { + PARSER.declareObjectArray(constructorArg(), ListPitInfo.PARSER, new ParseField("pits")); + } + + public static GetAllPitNodesResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/GetAllPitsAction.java new file mode 100644 index 0000000000000..8fe901add5e3a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitsAction.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; + +/** + * Action type for retrieving all PIT reader contexts from nodes + */ +public class GetAllPitsAction extends ActionType { + public static final GetAllPitsAction INSTANCE = new GetAllPitsAction(); + public static final String NAME = "indices:data/read/point_in_time/readall"; + + private GetAllPitsAction() { + super(NAME, GetAllPitNodesResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/ListPitInfo.java b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java new file mode 100644 index 0000000000000..249b0a9ab3baa --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/ListPitInfo.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.ToXContentFragment; +import org.opensearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * This holds information about pit reader context such as pit id and creation time + */ +public class ListPitInfo implements ToXContentFragment, Writeable { + private final String pitId; + private final long creationTime; + private final long keepAlive; + + public ListPitInfo(String pitId, long creationTime, long keepAlive) { + this.pitId = pitId; + this.creationTime = creationTime; + this.keepAlive = keepAlive; + } + + public ListPitInfo(StreamInput in) throws IOException { + this.pitId = in.readString(); + this.creationTime = in.readLong(); + this.keepAlive = in.readLong(); + } + + public String getPitId() { + return pitId; + } + + public long getCreationTime() { + return creationTime; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(pitId); + out.writeLong(creationTime); + out.writeLong(keepAlive); + } + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "list_pit_info", + true, + args -> new ListPitInfo((String) args[0], (long) args[1], (long) args[2]) + ); + + private static final ParseField CREATION_TIME = new ParseField("creation_time"); + private static final ParseField PIT_ID = new ParseField("pit_id"); + private static final ParseField KEEP_ALIVE = new ParseField("keep_alive"); + static { + PARSER.declareString(constructorArg(), PIT_ID); + PARSER.declareLong(constructorArg(), CREATION_TIME); + PARSER.declareLong(constructorArg(), KEEP_ALIVE); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(PIT_ID.getPreferredName(), pitId); + builder.field(CREATION_TIME.getPreferredName(), creationTime); + builder.field(KEEP_ALIVE.getPreferredName(), keepAlive); + builder.endObject(); + return builder; + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/PitSearchContextIdForNode.java b/server/src/main/java/org/opensearch/action/search/PitSearchContextIdForNode.java new file mode 100644 index 0000000000000..577a559beb8f9 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/PitSearchContextIdForNode.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * Pit ID along with Id for a search context per node. + * + * @opensearch.internal + */ +public class PitSearchContextIdForNode implements Writeable { + + private final String pitId; + private final SearchContextIdForNode searchContextIdForNode; + + public PitSearchContextIdForNode(String pitId, SearchContextIdForNode searchContextIdForNode) { + this.pitId = pitId; + this.searchContextIdForNode = searchContextIdForNode; + } + + PitSearchContextIdForNode(StreamInput in) throws IOException { + this.pitId = in.readString(); + this.searchContextIdForNode = new SearchContextIdForNode(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(pitId); + searchContextIdForNode.writeTo(out); + } + + public String getPitId() { + return pitId; + } + + public SearchContextIdForNode getSearchContextIdForNode() { + return searchContextIdForNode; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/PitService.java b/server/src/main/java/org/opensearch/action/search/PitService.java new file mode 100644 index 0000000000000..f42d84477f9a3 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/PitService.java @@ -0,0 +1,209 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Strings; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.stream.Collectors; + +/** + * Service class for PIT reusable functions + */ +public class PitService { + + private static final Logger logger = LogManager.getLogger(PitService.class); + + private final ClusterService clusterService; + private final SearchTransportService searchTransportService; + private final TransportService transportService; + private final NodeClient nodeClient; + + @Inject + public PitService( + ClusterService clusterService, + SearchTransportService searchTransportService, + TransportService transportService, + NodeClient nodeClient + ) { + this.clusterService = clusterService; + this.searchTransportService = searchTransportService; + this.transportService = transportService; + this.nodeClient = nodeClient; + } + + /** + * Delete list of pit contexts. Returns the details of success of operation per PIT ID. + */ + public void deletePitContexts( + Map> nodeToContextsMap, + ActionListener listener + ) { + if (nodeToContextsMap.size() == 0) { + listener.onResponse(new DeletePitResponse(Collections.emptyList())); + } + final Set clusters = nodeToContextsMap.values() + .stream() + .flatMap(Collection::stream) + .filter(ctx -> Strings.isEmpty(ctx.getSearchContextIdForNode().getClusterAlias()) == false) + .map(c -> c.getSearchContextIdForNode().getClusterAlias()) + .collect(Collectors.toSet()); + StepListener> lookupListener = (StepListener< + BiFunction>) SearchUtils.getConnectionLookupListener( + searchTransportService.getRemoteClusterService(), + clusterService.state(), + clusters + ); + lookupListener.whenComplete(nodeLookup -> { + final GroupedActionListener groupedListener = getDeletePitGroupedListener( + listener, + nodeToContextsMap.size() + ); + + for (Map.Entry> entry : nodeToContextsMap.entrySet()) { + String clusterAlias = entry.getValue().get(0).getSearchContextIdForNode().getClusterAlias(); + DiscoveryNode node = nodeLookup.apply(clusterAlias, entry.getValue().get(0).getSearchContextIdForNode().getNode()); + if (node == null) { + node = this.clusterService.state().getNodes().get(entry.getValue().get(0).getSearchContextIdForNode().getNode()); + } + if (node == null) { + logger.error( + () -> new ParameterizedMessage("node [{}] not found", entry.getValue().get(0).getSearchContextIdForNode().getNode()) + ); + List deletePitInfos = new ArrayList<>(); + for (PitSearchContextIdForNode pitSearchContextIdForNode : entry.getValue()) { + deletePitInfos.add(new DeletePitInfo(false, pitSearchContextIdForNode.getPitId())); + } + groupedListener.onResponse(new DeletePitResponse(deletePitInfos)); + } else { + try { + final Transport.Connection connection = searchTransportService.getConnection(clusterAlias, node); + searchTransportService.sendFreePITContexts(connection, entry.getValue(), groupedListener); + } catch (Exception e) { + String nodeName = node.getName(); + logger.error(() -> new ParameterizedMessage("Delete PITs failed on node [{}]", nodeName), e); + List deletePitInfos = new ArrayList<>(); + for (PitSearchContextIdForNode pitSearchContextIdForNode : entry.getValue()) { + deletePitInfos.add(new DeletePitInfo(false, pitSearchContextIdForNode.getPitId())); + } + groupedListener.onResponse(new DeletePitResponse(deletePitInfos)); + } + } + } + }, listener::onFailure); + } + + public GroupedActionListener getDeletePitGroupedListener(ActionListener listener, int size) { + return new GroupedActionListener<>(new ActionListener<>() { + @Override + public void onResponse(final Collection responses) { + Map pitIdToSucceededMap = new HashMap<>(); + for (DeletePitResponse response : responses) { + for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { + if (!pitIdToSucceededMap.containsKey(deletePitInfo.getPitId())) { + pitIdToSucceededMap.put(deletePitInfo.getPitId(), deletePitInfo.isSuccessful()); + } + if (!deletePitInfo.isSuccessful()) { + logger.debug(() -> new ParameterizedMessage("Deleting PIT with ID {} failed ", deletePitInfo.getPitId())); + pitIdToSucceededMap.put(deletePitInfo.getPitId(), deletePitInfo.isSuccessful()); + } + } + } + List deletePitResults = new ArrayList<>(); + for (Map.Entry entry : pitIdToSucceededMap.entrySet()) { + deletePitResults.add(new DeletePitInfo(entry.getValue(), entry.getKey())); + } + DeletePitResponse deletePitResponse = new DeletePitResponse(deletePitResults); + listener.onResponse(deletePitResponse); + } + + @Override + public void onFailure(final Exception e) { + logger.error("Delete PITs failed", e); + listener.onFailure(e); + } + }, size); + } + + /** + * This method returns indices associated for each pit + */ + public Map getIndicesForPits(List pitIds) { + Map pitToIndicesMap = new HashMap<>(); + for (String pitId : pitIds) { + pitToIndicesMap.put(pitId, SearchContextId.decode(nodeClient.getNamedWriteableRegistry(), pitId).getActualIndices()); + } + return pitToIndicesMap; + } + + /** + * Get all active point in time contexts + */ + public void getAllPits(ActionListener getAllPitsListener) { + final List nodes = new ArrayList<>(); + for (ObjectCursor cursor : clusterService.state().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = nodes.toArray(new DiscoveryNode[nodes.size()]); + GetAllPitNodesRequest getAllPitNodesRequest = new GetAllPitNodesRequest(disNodesArr); + transportService.sendRequest( + transportService.getLocalNode(), + GetAllPitsAction.NAME, + getAllPitNodesRequest, + new TransportResponseHandler() { + + @Override + public void handleResponse(GetAllPitNodesResponse response) { + getAllPitsListener.onResponse(response); + } + + @Override + public void handleException(TransportException exp) { + getAllPitsListener.onFailure(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public GetAllPitNodesResponse read(StreamInput in) throws IOException { + return new GetAllPitNodesResponse(in); + } + } + ); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchContextId.java b/server/src/main/java/org/opensearch/action/search/SearchContextId.java index c2bb46a7b0e57..8a9cf1dc9772d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/opensearch/action/search/SearchContextId.java @@ -116,7 +116,7 @@ public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegist } return new SearchContextId(Collections.unmodifiableMap(shards), Collections.unmodifiableMap(aliasFilters)); } catch (IOException e) { - throw new IllegalArgumentException(e); + throw new IllegalArgumentException("invalid id: [" + id + "]", e); } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java index 8f16a6e3ee226..7f218a3b1a17e 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java +++ b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java @@ -50,7 +50,7 @@ public final class SearchContextIdForNode implements Writeable { private final ShardSearchContextId searchContextId; private final String clusterAlias; - SearchContextIdForNode(@Nullable String clusterAlias, String node, ShardSearchContextId searchContextId) { + public SearchContextIdForNode(@Nullable String clusterAlias, String node, ShardSearchContextId searchContextId) { this.node = node; this.clusterAlias = clusterAlias; this.searchContextId = searchContextId; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index f91276960397a..241b3de72a258 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -71,7 +71,9 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.function.BiFunction; @@ -87,6 +89,8 @@ public class SearchTransportService { public static final String FREE_CONTEXT_SCROLL_ACTION_NAME = "indices:data/read/search[free_context/scroll]"; public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]"; public static final String CLEAR_SCROLL_CONTEXTS_ACTION_NAME = "indices:data/read/search[clear_scroll_contexts]"; + public static final String FREE_PIT_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context/pit]"; + public static final String FREE_ALL_PIT_CONTEXTS_ACTION_NAME = "indices:data/read/search[free_pit_contexts]"; public static final String DFS_ACTION_NAME = "indices:data/read/search[phase/dfs]"; public static final String QUERY_ACTION_NAME = "indices:data/read/search[phase/query]"; public static final String QUERY_ID_ACTION_NAME = "indices:data/read/search[phase/query/id]"; @@ -95,6 +99,8 @@ public class SearchTransportService { public static final String FETCH_ID_SCROLL_ACTION_NAME = "indices:data/read/search[phase/fetch/id/scroll]"; public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]"; public static final String QUERY_CAN_MATCH_NAME = "indices:data/read/search[can_match]"; + public static final String CREATE_READER_CONTEXT_ACTION_NAME = "indices:data/read/search[create_context]"; + public static final String UPDATE_READER_CONTEXT_ACTION_NAME = "indices:data/read/search[update_context]"; private final TransportService transportService; private final BiFunction responseWrapper; @@ -142,6 +148,36 @@ public void sendFreeContext( ); } + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener actionListener + ) { + transportService.sendRequest( + connection, + UPDATE_READER_CONTEXT_ACTION_NAME, + request, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(actionListener, UpdatePitContextResponse::new) + ); + } + + public void createPitContext( + Transport.Connection connection, + TransportCreatePitAction.CreateReaderContextRequest request, + SearchTask task, + ActionListener actionListener + ) { + transportService.sendChildRequest( + connection, + CREATE_READER_CONTEXT_ACTION_NAME, + request, + task, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(actionListener, TransportCreatePitAction.CreateReaderContextResponse::new) + ); + } + public void sendCanMatch( Transport.Connection connection, final ShardSearchRequest request, @@ -168,6 +204,20 @@ public void sendClearAllScrollContexts(Transport.Connection connection, final Ac ); } + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + transportService.sendRequest( + connection, + FREE_PIT_CONTEXT_ACTION_NAME, + new PitFreeContextsRequest(contextIds), + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(listener, DeletePitResponse::new) + ); + } + public void sendExecuteDfs( Transport.Connection connection, final ShardSearchRequest request, @@ -338,6 +388,43 @@ public ShardSearchContextId id() { } + /** + * Request to free the PIT context based on id + */ + static class PitFreeContextsRequest extends TransportRequest { + private List contextIds; + + PitFreeContextsRequest(List contextIds) { + this.contextIds = new ArrayList<>(); + this.contextIds.addAll(contextIds); + } + + PitFreeContextsRequest(StreamInput in) throws IOException { + super(in); + int size = in.readVInt(); + if (size > 0) { + this.contextIds = new ArrayList<>(); + for (int i = 0; i < size; i++) { + PitSearchContextIdForNode contextId = new PitSearchContextIdForNode(in); + contextIds.add(contextId); + } + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(contextIds.size()); + for (PitSearchContextIdForNode contextId : contextIds) { + contextId.writeTo(out); + } + } + + public List getContextIds() { + return this.contextIds; + } + } + /** * A search free context request * @@ -422,6 +509,15 @@ public static void registerRequestHandler(TransportService transportService, Sea } ); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_SCROLL_ACTION_NAME, SearchFreeContextResponse::new); + + transportService.registerRequestHandler( + FREE_PIT_CONTEXT_ACTION_NAME, + ThreadPool.Names.SAME, + PitFreeContextsRequest::new, + (request, channel, task) -> { channel.sendResponse(searchService.freeReaderContextsIfFound(request.getContextIds())); } + ); + TransportActionProxy.registerProxyAction(transportService, FREE_PIT_CONTEXT_ACTION_NAME, DeletePitResponse::new); + transportService.registerRequestHandler( FREE_CONTEXT_ACTION_NAME, ThreadPool.Names.SAME, @@ -562,6 +658,48 @@ public static void registerRequestHandler(TransportService transportService, Sea } ); TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NAME, SearchService.CanMatchResponse::new); + transportService.registerRequestHandler( + CREATE_READER_CONTEXT_ACTION_NAME, + ThreadPool.Names.SAME, + TransportCreatePitAction.CreateReaderContextRequest::new, + (request, channel, task) -> { + ChannelActionListener< + TransportCreatePitAction.CreateReaderContextResponse, + TransportCreatePitAction.CreateReaderContextRequest> listener = new ChannelActionListener<>( + channel, + CREATE_READER_CONTEXT_ACTION_NAME, + request + ); + searchService.createPitReaderContext( + request.getShardId(), + request.getKeepAlive(), + ActionListener.wrap( + r -> listener.onResponse(new TransportCreatePitAction.CreateReaderContextResponse(r)), + listener::onFailure + ) + ); + } + ); + TransportActionProxy.registerProxyAction( + transportService, + CREATE_READER_CONTEXT_ACTION_NAME, + TransportCreatePitAction.CreateReaderContextResponse::new + ); + + transportService.registerRequestHandler( + UPDATE_READER_CONTEXT_ACTION_NAME, + ThreadPool.Names.SAME, + UpdatePitContextRequest::new, + (request, channel, task) -> { + ChannelActionListener listener = new ChannelActionListener<>( + channel, + UPDATE_READER_CONTEXT_ACTION_NAME, + request + ); + searchService.updatePitIdAndKeepAlive(request, listener); + } + ); + TransportActionProxy.registerProxyAction(transportService, UPDATE_READER_CONTEXT_ACTION_NAME, UpdatePitContextResponse::new); } /** diff --git a/server/src/main/java/org/opensearch/action/search/SearchUtils.java b/server/src/main/java/org/opensearch/action/search/SearchUtils.java new file mode 100644 index 0000000000000..96fcda0d491c9 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchUtils.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.transport.RemoteClusterService; + +import java.util.Set; +import java.util.function.BiFunction; + +/** + * Helper class for common search functions + */ +public class SearchUtils { + + public SearchUtils() {} + + /** + * Get connection lookup listener for list of clusters passed + */ + public static ActionListener> getConnectionLookupListener( + RemoteClusterService remoteClusterService, + ClusterState state, + Set clusters + ) { + final StepListener> lookupListener = new StepListener<>(); + + if (clusters.isEmpty()) { + lookupListener.onResponse((cluster, nodeId) -> state.getNodes().get(nodeId)); + } else { + remoteClusterService.collectNodes(clusters, lookupListener); + } + return lookupListener; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java new file mode 100644 index 0000000000000..c6bf610edfb9a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.shard.ShardId; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Arrays; + +/** + * Transport action for creating PIT reader context + */ +public class TransportCreatePitAction extends HandledTransportAction { + + public static final String CREATE_PIT_ACTION = "create_pit"; + private final TransportService transportService; + private final SearchTransportService searchTransportService; + private final ClusterService clusterService; + private final TransportSearchAction transportSearchAction; + private final NamedWriteableRegistry namedWriteableRegistry; + private final CreatePitController createPitController; + + @Inject + public TransportCreatePitAction( + TransportService transportService, + ActionFilters actionFilters, + SearchTransportService searchTransportService, + ClusterService clusterService, + TransportSearchAction transportSearchAction, + NamedWriteableRegistry namedWriteableRegistry, + CreatePitController createPitController + ) { + super(CreatePitAction.NAME, transportService, actionFilters, in -> new CreatePitRequest(in)); + this.transportService = transportService; + this.searchTransportService = searchTransportService; + this.clusterService = clusterService; + this.transportSearchAction = transportSearchAction; + this.namedWriteableRegistry = namedWriteableRegistry; + this.createPitController = createPitController; + } + + @Override + protected void doExecute(Task task, CreatePitRequest request, ActionListener listener) { + final StepListener createPitListener = new StepListener<>(); + final ActionListener updatePitIdListener = ActionListener.wrap(r -> listener.onResponse(r), e -> { + logger.error( + () -> new ParameterizedMessage( + "PIT creation failed while updating PIT ID for indices [{}]", + Arrays.toString(request.indices()) + ) + ); + listener.onFailure(e); + }); + createPitController.executeCreatePit(request, task, createPitListener, updatePitIdListener); + } + + /** + * Request to create pit reader context with keep alive + */ + public static class CreateReaderContextRequest extends TransportRequest { + private final ShardId shardId; + private final TimeValue keepAlive; + + public CreateReaderContextRequest(ShardId shardId, TimeValue keepAlive) { + this.shardId = shardId; + this.keepAlive = keepAlive; + } + + public ShardId getShardId() { + return shardId; + } + + public TimeValue getKeepAlive() { + return keepAlive; + } + + public CreateReaderContextRequest(StreamInput in) throws IOException { + super(in); + this.shardId = new ShardId(in); + this.keepAlive = in.readTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + shardId.writeTo(out); + out.writeTimeValue(keepAlive); + } + } + + /** + * Create pit reader context response which holds the contextId + */ + public static class CreateReaderContextResponse extends SearchPhaseResult { + public CreateReaderContextResponse(ShardSearchContextId shardSearchContextId) { + this.contextId = shardSearchContextId; + } + + public CreateReaderContextResponse(StreamInput in) throws IOException { + super(in); + contextId = new ShardSearchContextId(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + contextId.writeTo(out); + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java new file mode 100644 index 0000000000000..b85fe302a748f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Transport action for deleting point in time searches - supports deleting list and all point in time searches + */ +public class TransportDeletePitAction extends HandledTransportAction { + private final NamedWriteableRegistry namedWriteableRegistry; + private final PitService pitService; + + @Inject + public TransportDeletePitAction( + TransportService transportService, + ActionFilters actionFilters, + NamedWriteableRegistry namedWriteableRegistry, + PitService pitService + ) { + super(DeletePitAction.NAME, transportService, actionFilters, DeletePitRequest::new); + this.namedWriteableRegistry = namedWriteableRegistry; + this.pitService = pitService; + } + + /** + * Invoke 'delete all pits' or 'delete list of pits' workflow based on request + */ + @Override + protected void doExecute(Task task, DeletePitRequest request, ActionListener listener) { + List pitIds = request.getPitIds(); + if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { + deleteAllPits(listener); + } else { + deletePits(listener, request); + } + } + + /** + * Deletes one or more point in time search contexts. + */ + private void deletePits(ActionListener listener, DeletePitRequest request) { + Map> nodeToContextsMap = new HashMap<>(); + for (String pitId : request.getPitIds()) { + SearchContextId contextId = SearchContextId.decode(namedWriteableRegistry, pitId); + for (SearchContextIdForNode contextIdForNode : contextId.shards().values()) { + PitSearchContextIdForNode pitSearchContext = new PitSearchContextIdForNode(pitId, contextIdForNode); + List contexts = nodeToContextsMap.getOrDefault(contextIdForNode.getNode(), new ArrayList<>()); + contexts.add(pitSearchContext); + nodeToContextsMap.put(contextIdForNode.getNode(), contexts); + } + } + pitService.deletePitContexts(nodeToContextsMap, listener); + } + + /** + * Delete all active PIT reader contexts leveraging list all PITs + * + * For Cross cluster PITs : + * - mixed cluster PITs ( PIT comprising local and remote ) will be fully deleted. Since there will atleast be + * one reader context with PIT ID present in local cluster, 'Get all PITs' will retrieve the PIT ID with which + * we can completely delete the PIT contexts in both local and remote cluster. + * - fully remote PITs will not be deleted as 'Get all PITs' operates on local cluster only and no PIT info can + * be retrieved when it's fully remote. + */ + private void deleteAllPits(ActionListener listener) { + // Get all PITs and execute delete operation for the PITs. + pitService.getAllPits(ActionListener.wrap(getAllPitNodesResponse -> { + DeletePitRequest deletePitRequest = new DeletePitRequest( + getAllPitNodesResponse.getPitInfos().stream().map(r -> r.getPitId()).collect(Collectors.toList()) + ); + deletePits(listener, deletePitRequest); + }, listener::onFailure)); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java new file mode 100644 index 0000000000000..39299f9a33b18 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/TransportGetAllPitsAction.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.FailedNodeException; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.nodes.TransportNodesAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.search.SearchService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +/** + * Transport action to get all active PIT contexts across all nodes + */ +public class TransportGetAllPitsAction extends TransportNodesAction< + GetAllPitNodesRequest, + GetAllPitNodesResponse, + GetAllPitNodeRequest, + GetAllPitNodeResponse> { + private final SearchService searchService; + + @Inject + public TransportGetAllPitsAction( + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + SearchService searchService + ) { + super( + GetAllPitsAction.NAME, + threadPool, + clusterService, + transportService, + actionFilters, + GetAllPitNodesRequest::new, + GetAllPitNodeRequest::new, + ThreadPool.Names.SAME, + GetAllPitNodeResponse.class + ); + this.searchService = searchService; + } + + @Override + protected GetAllPitNodesResponse newResponse( + GetAllPitNodesRequest request, + List getAllPitNodeResponses, + List failures + ) { + return new GetAllPitNodesResponse(clusterService.getClusterName(), getAllPitNodeResponses, failures); + } + + @Override + protected GetAllPitNodeRequest newNodeRequest(GetAllPitNodesRequest request) { + return new GetAllPitNodeRequest(); + } + + @Override + protected GetAllPitNodeResponse newNodeResponse(StreamInput in) throws IOException { + return new GetAllPitNodeResponse(in); + } + + /** + * This retrieves all active PITs in the node + */ + @Override + protected GetAllPitNodeResponse nodeOperation(GetAllPitNodeRequest request) { + GetAllPitNodeResponse nodeResponse = new GetAllPitNodeResponse( + transportService.getLocalNode(), + searchService.getAllPITReaderContexts() + ); + return nodeResponse; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index ebb0f21d6fe16..1ca477942cdf6 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -65,6 +65,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.index.Index; import org.opensearch.index.query.Rewriteable; @@ -297,6 +298,81 @@ void executeOnShardTarget( ); } + public void executeRequest( + Task task, + SearchRequest searchRequest, + String actionName, + boolean includeSearchContext, + SinglePhaseSearchAction phaseSearchAction, + ActionListener listener + ) { + executeRequest(task, searchRequest, new SearchAsyncActionProvider() { + @Override + public AbstractSearchAsyncAction asyncSearchAction( + SearchTask task, + SearchRequest searchRequest, + Executor executor, + GroupShardsIterator shardsIts, + SearchTimeProvider timeProvider, + BiFunction connectionLookup, + ClusterState clusterState, + Map aliasFilter, + Map concreteIndexBoosts, + Map> indexRoutings, + ActionListener listener, + boolean preFilter, + ThreadPool threadPool, + SearchResponse.Clusters clusters + ) { + return new AbstractSearchAsyncAction( + actionName, + logger, + searchTransportService, + connectionLookup, + aliasFilter, + concreteIndexBoosts, + indexRoutings, + executor, + searchRequest, + listener, + shardsIts, + timeProvider, + clusterState, + task, + new ArraySearchPhaseResults<>(shardsIts.size()), + searchRequest.getMaxConcurrentShardRequests(), + clusters + ) { + @Override + protected void executePhaseOnShard( + SearchShardIterator shardIt, + SearchShardTarget shard, + SearchActionListener listener + ) { + final Transport.Connection connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); + phaseSearchAction.executeOnShardTarget(task, shard, connection, listener); + } + + @Override + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + return new SearchPhase(getName()) { + @Override + public void run() { + final AtomicArray atomicArray = results.getAtomicArray(); + sendSearchResponse(InternalSearchResponse.empty(), atomicArray); + } + }; + } + + @Override + boolean buildPointInTimeFromSearchResults() { + return includeSearchContext; + } + }; + } + }, listener); + } + private void executeRequest( Task task, SearchRequest searchRequest, diff --git a/server/src/main/java/org/opensearch/action/search/UpdatePitContextRequest.java b/server/src/main/java/org/opensearch/action/search/UpdatePitContextRequest.java new file mode 100644 index 0000000000000..e6c9befb7938f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/UpdatePitContextRequest.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; + +/** + * Request used to update PIT reader contexts with pitId, keepAlive and creationTime + */ +public class UpdatePitContextRequest extends TransportRequest { + private final String pitId; + private final long keepAlive; + + private final long creationTime; + private final ShardSearchContextId searchContextId; + + public UpdatePitContextRequest(ShardSearchContextId searchContextId, String pitId, long keepAlive, long creationTime) { + this.pitId = pitId; + this.searchContextId = searchContextId; + this.keepAlive = keepAlive; + this.creationTime = creationTime; + } + + UpdatePitContextRequest(StreamInput in) throws IOException { + super(in); + pitId = in.readString(); + keepAlive = in.readLong(); + creationTime = in.readLong(); + searchContextId = new ShardSearchContextId(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(pitId); + out.writeLong(keepAlive); + out.writeLong(creationTime); + searchContextId.writeTo(out); + } + + public ShardSearchContextId getSearchContextId() { + return searchContextId; + } + + public String getPitId() { + return pitId; + } + + public long getCreationTime() { + return creationTime; + } + + public long getKeepAlive() { + return keepAlive; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java b/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java new file mode 100644 index 0000000000000..919dd87ea3041 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; + +/** + * Update PIT context response with creation time, keep alive etc. + */ +public class UpdatePitContextResponse extends TransportResponse { + private final String pitId; + + private final long creationTime; + + private final long keepAlive; + + UpdatePitContextResponse(StreamInput in) throws IOException { + super(in); + pitId = in.readString(); + creationTime = in.readLong(); + keepAlive = in.readLong(); + } + + public UpdatePitContextResponse(String pitId, long creationTime, long keepAlive) { + this.pitId = pitId; + this.keepAlive = keepAlive; + this.creationTime = creationTime; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(pitId); + out.writeLong(creationTime); + out.writeLong(keepAlive); + } + + public String getPitId() { + return pitId; + } + + public long getKeepAlive() { + return keepAlive; + } + + public long getCreationTime() { + return creationTime; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index f849be4db4e2b..9e353a35831d0 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -532,6 +532,13 @@ private void onShardOperation( } } + /** + * This method reads ShardRouting from input stream + */ + public List getShardRoutingsFromInputStream(StreamInput in) throws IOException { + return in.readList(ShardRouting::new); + } + /** * A node request * @@ -547,7 +554,7 @@ public class NodeRequest extends TransportRequest implements IndicesRequest { public NodeRequest(StreamInput in) throws IOException { super(in); indicesLevelRequest = readRequestFrom(in); - shards = in.readList(ShardRouting::new); + shards = getShardRoutingsFromInputStream(in); nodeId = in.readString(); } diff --git a/server/src/main/java/org/opensearch/client/Client.java b/server/src/main/java/org/opensearch/client/Client.java index 50f8f52253815..f20f0b4246cb6 100644 --- a/server/src/main/java/org/opensearch/client/Client.java +++ b/server/src/main/java/org/opensearch/client/Client.java @@ -34,6 +34,8 @@ import org.opensearch.action.ActionFuture; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.bulk.BulkRequestBuilder; import org.opensearch.action.bulk.BulkResponse; @@ -58,6 +60,12 @@ import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollRequestBuilder; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.GetAllPitNodesRequest; +import org.opensearch.action.search.GetAllPitNodesResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchRequestBuilder; import org.opensearch.action.search.MultiSearchResponse; @@ -325,6 +333,26 @@ public interface Client extends OpenSearchClient, Releasable { */ SearchScrollRequestBuilder prepareSearchScroll(String scrollId); + /** + * Create point in time for one or more indices + */ + void createPit(CreatePitRequest createPITRequest, ActionListener listener); + + /** + * Delete one or more point in time contexts + */ + void deletePits(DeletePitRequest deletePITRequest, ActionListener listener); + + /** + * Get all active point in time searches + */ + void getAllPits(GetAllPitNodesRequest getAllPitNodesRequest, ActionListener listener); + + /** + * Get information of segments of one or more PITs + */ + void pitSegments(PitSegmentsRequest pitSegmentsRequest, ActionListener listener); + /** * Performs multiple search requests. */ diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 02a840f64ada4..21cd01bf65a45 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -240,6 +240,8 @@ import org.opensearch.action.admin.indices.segments.IndicesSegmentsAction; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequestBuilder; +import org.opensearch.action.admin.indices.segments.PitSegmentsAction; +import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; import org.opensearch.action.admin.indices.settings.get.GetSettingsAction; import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest; import org.opensearch.action.admin.indices.settings.get.GetSettingsRequestBuilder; @@ -327,10 +329,19 @@ import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollRequestBuilder; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.GetAllPitNodesRequest; +import org.opensearch.action.search.GetAllPitNodesResponse; import org.opensearch.action.search.MultiSearchAction; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchRequestBuilder; import org.opensearch.action.search.MultiSearchResponse; +import org.opensearch.action.search.GetAllPitsAction; import org.opensearch.action.search.SearchAction; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchRequestBuilder; @@ -577,6 +588,26 @@ public SearchScrollRequestBuilder prepareSearchScroll(String scrollId) { return new SearchScrollRequestBuilder(this, SearchScrollAction.INSTANCE, scrollId); } + @Override + public void createPit(final CreatePitRequest createPITRequest, final ActionListener listener) { + execute(CreatePitAction.INSTANCE, createPITRequest, listener); + } + + @Override + public void deletePits(final DeletePitRequest deletePITRequest, final ActionListener listener) { + execute(DeletePitAction.INSTANCE, deletePITRequest, listener); + } + + @Override + public void getAllPits(final GetAllPitNodesRequest getAllPitNodesRequest, final ActionListener listener) { + execute(GetAllPitsAction.INSTANCE, getAllPitNodesRequest, listener); + } + + @Override + public void pitSegments(final PitSegmentsRequest request, final ActionListener listener) { + execute(PitSegmentsAction.INSTANCE, request, listener); + } + @Override public ActionFuture multiSearch(MultiSearchRequest request) { return execute(MultiSearchAction.INSTANCE, request); diff --git a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java index 7dec8f9c84a89..e3aa2a666d454 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/ShardRouting.java @@ -54,7 +54,7 @@ * * @opensearch.internal */ -public final class ShardRouting implements Writeable, ToXContentObject { +public class ShardRouting implements Writeable, ToXContentObject { /** * Used if shard size is not available @@ -78,7 +78,7 @@ public final class ShardRouting implements Writeable, ToXContentObject { * A constructor to internally create shard routing instances, note, the internal flag should only be set to true * by either this class or tests. Visible for testing. */ - ShardRouting( + protected ShardRouting( ShardId shardId, String currentNodeId, String relocatingNodeId, diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 8325f4fb289f6..42c4196b29f37 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -34,6 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.opensearch.action.main.TransportMainAction; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; +import org.opensearch.action.search.CreatePitController; import org.opensearch.cluster.routing.allocation.decider.NodeLoadAwareAllocationDecider; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; @@ -472,6 +473,9 @@ public void apply(Settings value, Settings current, Settings previous) { MultiBucketConsumerService.MAX_BUCKET_SETTING, SearchService.LOW_LEVEL_CANCELLATION_SETTING, SearchService.MAX_OPEN_SCROLL_CONTEXT, + SearchService.MAX_OPEN_PIT_CONTEXT, + SearchService.MAX_PIT_KEEPALIVE_SETTING, + CreatePitController.PIT_INIT_KEEP_ALIVE, Node.WRITE_PORTS_FILE_SETTING, Node.NODE_NAME_SETTING, Node.NODE_ATTRIBUTES, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index be5d9feec9e8d..dc28fcaf2596f 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -150,6 +150,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_CHECK_ON_STARTUP, IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD, IndexSettings.MAX_SLICES_PER_SCROLL, + IndexSettings.MAX_SLICES_PER_PIT, IndexSettings.MAX_REGEX_LENGTH_SETTING, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING, IndexSettings.INDEX_GC_DELETES_SETTING, diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 8b0a6278c2176..74b104392133f 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -457,6 +457,17 @@ public final class IndexSettings { Property.IndexScope ); + /** + * The maximum number of slices allowed in a search request with PIT + */ + public static final Setting MAX_SLICES_PER_PIT = Setting.intSetting( + "index.max_slices_per_pit", + 1024, + 1, + Property.Dynamic, + Property.IndexScope + ); + /** * The maximum length of regex string allowed in a regexp query. */ @@ -618,7 +629,10 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { * The maximum number of slices allowed in a scroll request. */ private volatile int maxSlicesPerScroll; - + /** + * The maximum number of slices allowed in a PIT request. + */ + private volatile int maxSlicesPerPit; /** * The maximum length of regex string allowed in a regexp query. */ @@ -737,6 +751,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxShingleDiff = scopedSettings.get(MAX_SHINGLE_DIFF_SETTING); maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD); maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL); + maxSlicesPerPit = scopedSettings.get(MAX_SLICES_PER_PIT); maxAnalyzedOffset = scopedSettings.get(MAX_ANALYZED_OFFSET_SETTING); maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); @@ -810,6 +825,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(MAX_ANALYZED_OFFSET_SETTING, this::setHighlightMaxAnalyzedOffset); scopedSettings.addSettingsUpdateConsumer(MAX_TERMS_COUNT_SETTING, this::setMaxTermsCount); scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll); + scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_PIT, this::setMaxSlicesPerPit); scopedSettings.addSettingsUpdateConsumer(DEFAULT_FIELD_SETTING, this::setDefaultFields); scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_IDLE_AFTER, this::setSearchIdleAfter); scopedSettings.addSettingsUpdateConsumer(MAX_REGEX_LENGTH_SETTING, this::setMaxRegexLength); @@ -1289,6 +1305,17 @@ private void setMaxSlicesPerScroll(int value) { this.maxSlicesPerScroll = value; } + /** + * The maximum number of slices allowed in a PIT request. + */ + public int getMaxSlicesPerPit() { + return maxSlicesPerPit; + } + + private void setMaxSlicesPerPit(int value) { + this.maxSlicesPerPit = value; + } + /** * The maximum length of regex string allowed in a regexp query. */ diff --git a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java index fe23000902608..012c94639d526 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/SearchStats.java @@ -32,6 +32,7 @@ package org.opensearch.index.search.stats; +import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -77,6 +78,10 @@ public static class Stats implements Writeable, ToXContentFragment { private long suggestTimeInMillis; private long suggestCurrent; + private long pitCount; + private long pitTimeInMillis; + private long pitCurrent; + private Stats() { // for internal use, initializes all counts to 0 } @@ -91,6 +96,9 @@ public Stats( long scrollCount, long scrollTimeInMillis, long scrollCurrent, + long pitCount, + long pitTimeInMillis, + long pitCurrent, long suggestCount, long suggestTimeInMillis, long suggestCurrent @@ -110,6 +118,10 @@ public Stats( this.suggestCount = suggestCount; this.suggestTimeInMillis = suggestTimeInMillis; this.suggestCurrent = suggestCurrent; + + this.pitCount = pitCount; + this.pitTimeInMillis = pitTimeInMillis; + this.pitCurrent = pitCurrent; } private Stats(StreamInput in) throws IOException { @@ -128,6 +140,12 @@ private Stats(StreamInput in) throws IOException { suggestCount = in.readVLong(); suggestTimeInMillis = in.readVLong(); suggestCurrent = in.readVLong(); + + if (in.getVersion().onOrAfter(Version.V_2_4_0)) { + pitCount = in.readVLong(); + pitTimeInMillis = in.readVLong(); + pitCurrent = in.readVLong(); + } } public void add(Stats stats) { @@ -146,6 +164,10 @@ public void add(Stats stats) { suggestCount += stats.suggestCount; suggestTimeInMillis += stats.suggestTimeInMillis; suggestCurrent += stats.suggestCurrent; + + pitCount += stats.pitCount; + pitTimeInMillis += stats.pitTimeInMillis; + pitCurrent += stats.pitCurrent; } public void addForClosingShard(Stats stats) { @@ -162,6 +184,10 @@ public void addForClosingShard(Stats stats) { suggestCount += stats.suggestCount; suggestTimeInMillis += stats.suggestTimeInMillis; + + pitCount += stats.pitCount; + pitTimeInMillis += stats.pitTimeInMillis; + pitCurrent += stats.pitCurrent; } public long getQueryCount() { @@ -212,6 +238,22 @@ public long getScrollCurrent() { return scrollCurrent; } + public long getPitCount() { + return pitCount; + } + + public TimeValue getPitTime() { + return new TimeValue(pitTimeInMillis); + } + + public long getPitTimeInMillis() { + return pitTimeInMillis; + } + + public long getPitCurrent() { + return pitCurrent; + } + public long getSuggestCount() { return suggestCount; } @@ -249,6 +291,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(suggestCount); out.writeVLong(suggestTimeInMillis); out.writeVLong(suggestCurrent); + + if (out.getVersion().onOrAfter(Version.V_2_4_0)) { + out.writeVLong(pitCount); + out.writeVLong(pitTimeInMillis); + out.writeVLong(pitCurrent); + } } @Override @@ -265,6 +313,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.humanReadableField(Fields.SCROLL_TIME_IN_MILLIS, Fields.SCROLL_TIME, getScrollTime()); builder.field(Fields.SCROLL_CURRENT, scrollCurrent); + builder.field(Fields.PIT_TOTAL, pitCount); + builder.humanReadableField(Fields.PIT_TIME_IN_MILLIS, Fields.PIT_TIME, getPitTime()); + builder.field(Fields.PIT_CURRENT, pitCurrent); + builder.field(Fields.SUGGEST_TOTAL, suggestCount); builder.humanReadableField(Fields.SUGGEST_TIME_IN_MILLIS, Fields.SUGGEST_TIME, getSuggestTime()); builder.field(Fields.SUGGEST_CURRENT, suggestCurrent); @@ -385,6 +437,10 @@ static final class Fields { static final String SCROLL_TIME = "scroll_time"; static final String SCROLL_TIME_IN_MILLIS = "scroll_time_in_millis"; static final String SCROLL_CURRENT = "scroll_current"; + static final String PIT_TOTAL = "point_in_time_total"; + static final String PIT_TIME = "point_in_time_time"; + static final String PIT_TIME_IN_MILLIS = "point_in_time_time_in_millis"; + static final String PIT_CURRENT = "point_in_time_current"; static final String SUGGEST_TOTAL = "suggest_total"; static final String SUGGEST_TIME = "suggest_time"; static final String SUGGEST_TIME_IN_MILLIS = "suggest_time_in_millis"; diff --git a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java index 3ef3571c75e59..6d0eb3a5949ca 100644 --- a/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java +++ b/server/src/main/java/org/opensearch/index/search/stats/ShardSearchStats.java @@ -187,6 +187,18 @@ public void onFreeScrollContext(ReaderContext readerContext) { totalStats.scrollMetric.inc(TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - readerContext.getStartTimeInNano())); } + @Override + public void onNewPitContext(ReaderContext readerContext) { + totalStats.pitCurrent.inc(); + } + + @Override + public void onFreePitContext(ReaderContext readerContext) { + totalStats.pitCurrent.dec(); + assert totalStats.pitCurrent.count() >= 0; + totalStats.pitMetric.inc(TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - readerContext.getStartTimeInNano())); + } + /** * Holder of statistics values * @@ -203,10 +215,12 @@ static final class StatsHolder { * for one-thousand times as long (i.e., scrolls that execute for almost twelve days on average). */ final MeanMetric scrollMetric = new MeanMetric(); + final MeanMetric pitMetric = new MeanMetric(); final MeanMetric suggestMetric = new MeanMetric(); final CounterMetric queryCurrent = new CounterMetric(); final CounterMetric fetchCurrent = new CounterMetric(); final CounterMetric scrollCurrent = new CounterMetric(); + final CounterMetric pitCurrent = new CounterMetric(); final CounterMetric suggestCurrent = new CounterMetric(); SearchStats.Stats stats() { @@ -220,6 +234,9 @@ SearchStats.Stats stats() { scrollMetric.count(), TimeUnit.MICROSECONDS.toMillis(scrollMetric.sum()), scrollCurrent.count(), + pitMetric.count(), + TimeUnit.MICROSECONDS.toMillis(pitMetric.sum()), + pitCurrent.count(), suggestMetric.count(), TimeUnit.NANOSECONDS.toMillis(suggestMetric.sum()), suggestCurrent.count() diff --git a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java index d3177055a5bd8..0a7c80f5e87d3 100644 --- a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java +++ b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java @@ -131,6 +131,19 @@ default void onFreeScrollContext(ReaderContext readerContext) {} */ default void validateReaderContext(ReaderContext readerContext, TransportRequest transportRequest) {} + /** + * Executed when a new Point-In-Time {@link ReaderContext} was created + * @param readerContext the created reader context + */ + default void onNewPitContext(ReaderContext readerContext) {} + + /** + * Executed when a Point-In-Time search {@link SearchContext} is freed. + * This happens on deletion of a Point-In-Time or on it's keep-alive is expiring. + * @param readerContext the freed search context + */ + default void onFreePitContext(ReaderContext readerContext) {} + /** * A Composite listener that multiplexes calls to each of the listeners methods. */ @@ -265,5 +278,36 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest } ExceptionsHelper.reThrowIfNotNull(exception); } + + /** + * Executed when a new Point-In-Time {@link ReaderContext} was created + * @param readerContext the created reader context + */ + @Override + public void onNewPitContext(ReaderContext readerContext) { + for (SearchOperationListener listener : listeners) { + try { + listener.onNewPitContext(readerContext); + } catch (Exception e) { + logger.warn("onNewPitContext listener failed", e); + } + } + } + + /** + * Executed when a Point-In-Time search {@link SearchContext} is freed. + * This happens on deletion of a Point-In-Time or on it's keep-alive is expiring. + * @param readerContext the freed search context + */ + @Override + public void onFreePitContext(ReaderContext readerContext) { + for (SearchOperationListener listener : listeners) { + try { + listener.onFreePitContext(readerContext); + } catch (Exception e) { + logger.warn("onFreePitContext listener failed", e); + } + } + } } } diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index 0131f6c8808f0..f5440a6cb1c06 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -590,6 +590,24 @@ protected Table getTableWithHeader(final RestRequest request) { ); table.addCell("pri.search.scroll_total", "default:false;text-align:right;desc:completed scroll contexts"); + table.addCell( + "search.point_in_time_current", + "sibling:pri;alias:scc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell("pri.search.point_in_time_current", "default:false;text-align:right;desc:open point in time contexts"); + + table.addCell( + "search.point_in_time_time", + "sibling:pri;alias:scti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell("pri.search.point_in_time_time", "default:false;text-align:right;desc:time point in time contexts held open"); + + table.addCell( + "search.point_in_time_total", + "sibling:pri;alias:scto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); + table.addCell("pri.search.point_in_time_total", "default:false;text-align:right;desc:completed point in time contexts"); + table.addCell("segments.count", "sibling:pri;alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("pri.segments.count", "default:false;text-align:right;desc:number of segments"); @@ -871,6 +889,15 @@ Table buildTable( table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getScrollCount()); table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getScrollCount()); + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getPitCurrent()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getPitCurrent()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getPitTime()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getPitTime()); + + table.addCell(totalStats.getSearch() == null ? null : totalStats.getSearch().getTotal().getPitCount()); + table.addCell(primaryStats.getSearch() == null ? null : primaryStats.getSearch().getTotal().getPitCount()); + table.addCell(totalStats.getSegments() == null ? null : totalStats.getSegments().getCount()); table.addCell(primaryStats.getSegments() == null ? null : primaryStats.getSegments().getCount()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index f39f96ece180a..4200e3d768e10 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -310,6 +310,19 @@ protected Table getTableWithHeader(final RestRequest request) { ); table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); + table.addCell( + "search.point_in_time_current", + "alias:scc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell( + "search.point_in_time_time", + "alias:scti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell( + "search.point_in_time_total", + "alias:scto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); + table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); table.addCell( @@ -519,6 +532,9 @@ Table buildTable( table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCurrent()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollTime()); table.addCell(searchStats == null ? null : searchStats.getTotal().getScrollCount()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitCurrent()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitTime()); + table.addCell(searchStats == null ? null : searchStats.getTotal().getPitCount()); SegmentsStats segmentsStats = indicesStats == null ? null : indicesStats.getSegments(); table.addCell(segmentsStats == null ? null : segmentsStats.getCount()); diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestPitSegmentsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestPitSegmentsAction.java new file mode 100644 index 0000000000000..ba9606e8eb444 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestPitSegmentsAction.java @@ -0,0 +1,171 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.cat; + +import org.opensearch.action.admin.indices.segments.IndexSegments; +import org.opensearch.action.admin.indices.segments.IndexShardSegments; +import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.opensearch.action.admin.indices.segments.PitSegmentsAction; +import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; +import org.opensearch.action.admin.indices.segments.ShardSegments; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.Table; +import org.opensearch.index.engine.Segment; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.action.RestResponseListener; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Rest action for pit segments + */ +public class RestPitSegmentsAction extends AbstractCatAction { + private final Supplier nodesInCluster; + + public RestPitSegmentsAction(Supplier nodesInCluster) { + super(); + this.nodesInCluster = nodesInCluster; + } + + @Override + public List routes() { + return unmodifiableList(asList(new Route(GET, "/_cat/pit_segments/_all"), new Route(GET, "/_cat/pit_segments"))); + } + + @Override + public String getName() { + return "cat_pit_segments_action"; + } + + @Override + public boolean allowSystemIndexAccessByDefault() { + return true; + } + + @Override + protected BaseRestHandler.RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) { + String allPitIdsQualifier = "_all"; + final PitSegmentsRequest pitSegmentsRequest; + if (request.path().contains(allPitIdsQualifier)) { + pitSegmentsRequest = new PitSegmentsRequest(allPitIdsQualifier); + } else { + pitSegmentsRequest = new PitSegmentsRequest(); + try { + request.withContentOrSourceParamParserOrNull((xContentParser -> { + if (xContentParser != null) { + pitSegmentsRequest.fromXContent(xContentParser); + } + })); + } catch (IOException e) { + throw new IllegalArgumentException("Failed to parse request body", e); + } + } + return channel -> client.execute(PitSegmentsAction.INSTANCE, pitSegmentsRequest, new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(final IndicesSegmentResponse indicesSegmentResponse) throws Exception { + final Map indicesSegments = indicesSegmentResponse.getIndices(); + Table tab = buildTable(request, indicesSegments); + return RestTable.buildResponse(tab, channel); + } + }); + } + + @Override + protected void documentation(StringBuilder sb) { + sb.append("/_cat/pit_segments\n"); + sb.append("/_cat/pit_segments/{pit_id}\n"); + } + + @Override + protected Table getTableWithHeader(RestRequest request) { + Table table = new Table(); + table.startHeaders(); + table.addCell("index", "default:true;alias:i,idx;desc:index name"); + table.addCell("shard", "default:true;alias:s,sh;desc:shard name"); + table.addCell("prirep", "alias:p,pr,primaryOrReplica;default:true;desc:primary or replica"); + table.addCell("ip", "default:true;desc:ip of node where it lives"); + table.addCell("id", "default:false;desc:unique id of node where it lives"); + table.addCell("segment", "default:true;alias:seg;desc:segment name"); + table.addCell("generation", "default:true;alias:g,gen;text-align:right;desc:segment generation"); + table.addCell("docs.count", "default:true;alias:dc,docsCount;text-align:right;desc:number of docs in segment"); + table.addCell("docs.deleted", "default:true;alias:dd,docsDeleted;text-align:right;desc:number of deleted docs in segment"); + table.addCell("size", "default:true;alias:si;text-align:right;desc:segment size in bytes"); + table.addCell("size.memory", "default:true;alias:sm,sizeMemory;text-align:right;desc:segment memory in bytes"); + table.addCell("committed", "default:true;alias:ic,isCommitted;desc:is segment committed"); + table.addCell("searchable", "default:true;alias:is,isSearchable;desc:is segment searched"); + table.addCell("version", "default:true;alias:v,ver;desc:version"); + table.addCell("compound", "default:true;alias:ico,isCompound;desc:is segment compound"); + table.endHeaders(); + return table; + } + + private Table buildTable(final RestRequest request, Map indicesSegments) { + Table table = getTableWithHeader(request); + + DiscoveryNodes nodes = this.nodesInCluster.get(); + table.startRow(); + table.addCell("index", "default:true;alias:i,idx;desc:index name"); + table.addCell("shard", "default:true;alias:s,sh;desc:shard name"); + table.addCell("prirep", "alias:p,pr,primaryOrReplica;default:true;desc:primary or replica"); + table.addCell("ip", "default:true;desc:ip of node where it lives"); + table.addCell("id", "default:false;desc:unique id of node where it lives"); + table.addCell("segment", "default:true;alias:seg;desc:segment name"); + table.addCell("generation", "default:true;alias:g,gen;text-align:right;desc:segment generation"); + table.addCell("docs.count", "default:true;alias:dc,docsCount;text-align:right;desc:number of docs in segment"); + table.addCell("docs.deleted", "default:true;alias:dd,docsDeleted;text-align:right;desc:number of deleted docs in segment"); + table.addCell("size", "default:true;alias:si;text-align:right;desc:segment size in bytes"); + table.addCell("size.memory", "default:true;alias:sm,sizeMemory;text-align:right;desc:segment memory in bytes"); + table.addCell("committed", "default:true;alias:ic,isCommitted;desc:is segment committed"); + table.addCell("searchable", "default:true;alias:is,isSearchable;desc:is segment searched"); + table.addCell("version", "default:true;alias:v,ver;desc:version"); + table.addCell("compound", "default:true;alias:ico,isCompound;desc:is segment compound"); + table.endRow(); + for (IndexSegments indexSegments : indicesSegments.values()) { + Map shards = indexSegments.getShards(); + for (IndexShardSegments indexShardSegments : shards.values()) { + ShardSegments[] shardSegments = indexShardSegments.getShards(); + for (ShardSegments shardSegment : shardSegments) { + List segments = shardSegment.getSegments(); + for (Segment segment : segments) { + table.startRow(); + table.addCell(shardSegment.getShardRouting().getIndexName()); + table.addCell(shardSegment.getShardRouting().getId()); + table.addCell(shardSegment.getShardRouting().primary() ? "p" : "r"); + table.addCell(nodes.get(shardSegment.getShardRouting().currentNodeId()).getHostAddress()); + table.addCell(shardSegment.getShardRouting().currentNodeId()); + table.addCell(segment.getName()); + table.addCell(segment.getGeneration()); + table.addCell(segment.getNumDocs()); + table.addCell(segment.getDeletedDocs()); + table.addCell(segment.getSize()); + table.addCell(0L); + table.addCell(segment.isCommitted()); + table.addCell(segment.isSearch()); + table.addCell(segment.getVersion()); + table.addCell(segment.isCompound()); + table.endRow(); + + } + } + } + } + return table; + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index 05f3d4cfb9041..ca6cfb6eab1f4 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -225,6 +225,18 @@ protected Table getTableWithHeader(final RestRequest request) { "alias:scti,searchScrollTime;default:false;text-align:right;desc:time scroll contexts held open" ); table.addCell("search.scroll_total", "alias:scto,searchScrollTotal;default:false;text-align:right;desc:completed scroll contexts"); + table.addCell( + "search.point_in_time_current", + "alias:spc,searchPointInTimeCurrent;default:false;text-align:right;desc:open point in time contexts" + ); + table.addCell( + "search.point_in_time_time", + "alias:spti,searchPointInTimeTime;default:false;text-align:right;desc:time point in time contexts held open" + ); + table.addCell( + "search.point_in_time_total", + "alias:spto,searchPointInTimeTotal;default:false;text-align:right;desc:completed point in time contexts" + ); table.addCell("segments.count", "alias:sc,segmentsCount;default:false;text-align:right;desc:number of segments"); table.addCell("segments.memory", "alias:sm,segmentsMemory;default:false;text-align:right;desc:memory used by segments"); @@ -390,6 +402,9 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCurrent())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollTime())); table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getScrollCount())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCurrent())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitTime())); + table.addCell(getOrNull(commonStats, CommonStats::getSearch, i -> i.getTotal().getPitCount())); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getCount)); table.addCell(getOrNull(commonStats, CommonStats::getSegments, SegmentsStats::getZeroMemory)); diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java new file mode 100644 index 0000000000000..9439670880015 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.Strings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestStatusToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.POST; + +/** + * Rest action for creating PIT context + */ +public class RestCreatePitAction extends BaseRestHandler { + public static String ALLOW_PARTIAL_PIT_CREATION = "allow_partial_pit_creation"; + public static String KEEP_ALIVE = "keep_alive"; + + @Override + public String getName() { + return "create_pit_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + boolean allowPartialPitCreation = request.paramAsBoolean(ALLOW_PARTIAL_PIT_CREATION, true); + String[] indices = Strings.splitStringByCommaToArray(request.param("index")); + TimeValue keepAlive = request.paramAsTime(KEEP_ALIVE, null); + CreatePitRequest createPitRequest = new CreatePitRequest(keepAlive, allowPartialPitCreation, indices); + createPitRequest.setIndicesOptions(IndicesOptions.fromRequest(request, createPitRequest.indicesOptions())); + createPitRequest.setPreference(request.param("preference")); + createPitRequest.setRouting(request.param("routing")); + + return channel -> client.createPit(createPitRequest, new RestStatusToXContentListener<>(channel)); + } + + @Override + public List routes() { + return unmodifiableList(asList(new Route(POST, "/{index}/_search/point_in_time"))); + } + +} diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java new file mode 100644 index 0000000000000..b19a7505741cc --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestStatusToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Rest action for deleting PIT contexts + */ +public class RestDeletePitAction extends BaseRestHandler { + @Override + public String getName() { + return "delete_pit_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + String allPitIdsQualifier = "_all"; + final DeletePitRequest deletePITRequest; + if (request.path().contains(allPitIdsQualifier)) { + deletePITRequest = new DeletePitRequest(asList(allPitIdsQualifier)); + } else { + deletePITRequest = new DeletePitRequest(); + request.withContentOrSourceParamParserOrNull((xContentParser -> { + if (xContentParser != null) { + try { + deletePITRequest.fromXContent(xContentParser); + } catch (IOException e) { + throw new IllegalArgumentException("Failed to parse request body", e); + } + } + })); + } + return channel -> client.deletePits(deletePITRequest, new RestStatusToXContentListener(channel)); + } + + @Override + public List routes() { + return unmodifiableList(asList(new Route(DELETE, "/_search/point_in_time"), new Route(DELETE, "/_search/point_in_time/_all"))); + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestGetAllPitsAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestGetAllPitsAction.java new file mode 100644 index 0000000000000..0e1febe9d2a61 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/RestGetAllPitsAction.java @@ -0,0 +1,90 @@ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.action.search.GetAllPitNodesRequest; +import org.opensearch.action.search.GetAllPitNodesResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.RestResponse; +import org.opensearch.rest.RestStatus; +import org.opensearch.rest.action.RestBuilderListener; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; + +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Rest action for retrieving all active PIT IDs across all nodes + */ +public class RestGetAllPitsAction extends BaseRestHandler { + + private final Supplier nodesInCluster; + + public RestGetAllPitsAction(Supplier nodesInCluster) { + super(); + this.nodesInCluster = nodesInCluster; + } + + @Override + public String getName() { + return "get_all_pit_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + final List nodes = new ArrayList<>(); + for (DiscoveryNode node : nodesInCluster.get()) { + nodes.add(node); + } + DiscoveryNode[] disNodesArr = nodes.toArray(new DiscoveryNode[nodes.size()]); + GetAllPitNodesRequest getAllPitNodesRequest = new GetAllPitNodesRequest(disNodesArr); + return channel -> client.getAllPits(getAllPitNodesRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(final GetAllPitNodesResponse getAllPITNodesResponse, XContentBuilder builder) + throws Exception { + builder.startObject(); + if (getAllPITNodesResponse.hasFailures()) { + builder.startArray("failures"); + for (int idx = 0; idx < getAllPITNodesResponse.failures().size(); idx++) { + builder.startObject(); + builder.field( + getAllPITNodesResponse.failures().get(idx).nodeId(), + getAllPITNodesResponse.failures().get(idx).getDetailedMessage() + ); + builder.endObject(); + } + builder.endArray(); + } + builder.field("pits", getAllPITNodesResponse.getPitInfos()); + builder.endObject(); + if (getAllPITNodesResponse.hasFailures() && getAllPITNodesResponse.getPitInfos().isEmpty()) { + return new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, builder); + } + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } + + @Override + public List routes() { + return unmodifiableList(Collections.singletonList(new Route(GET, "/_search/point_in_time/_all"))); + } +} diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index d09143e3373b4..4bc40610facf2 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -49,6 +49,7 @@ import org.opensearch.common.lucene.search.Queries; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.bitset.BitsetFilterCache; @@ -75,6 +76,7 @@ import org.opensearch.search.fetch.subphase.ScriptFieldsContext; import org.opensearch.search.fetch.subphase.highlight.SearchHighlightContext; import org.opensearch.search.internal.ContextIndexSearcher; +import org.opensearch.search.internal.PitReaderContext; import org.opensearch.search.internal.ReaderContext; import org.opensearch.search.internal.ScrollContext; import org.opensearch.search.internal.SearchContext; @@ -287,7 +289,7 @@ public void preProcess(boolean rewrite) { } } - if (sliceBuilder != null) { + if (sliceBuilder != null && scrollContext() != null) { int sliceLimit = indexService.getIndexSettings().getMaxSlicesPerScroll(); int numSlices = sliceBuilder.getMax(); if (numSlices > sliceLimit) { @@ -304,6 +306,22 @@ public void preProcess(boolean rewrite) { } } + if (sliceBuilder != null && readerContext != null && readerContext instanceof PitReaderContext) { + int sliceLimit = indexService.getIndexSettings().getMaxSlicesPerPit(); + int numSlices = sliceBuilder.getMax(); + if (numSlices > sliceLimit) { + throw new OpenSearchRejectedExecutionException( + "The number of slices [" + + numSlices + + "] is too large. It must " + + "be less than [" + + sliceLimit + + "]. This limit can be set by changing the [" + + IndexSettings.MAX_SLICES_PER_PIT.getKey() + + "] index level setting." + ); + } + } // initialize the filtering alias based on the provided filters try { final QueryBuilder queryBuilder = request.getAliasFilter().getQueryBuilder(); diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 3b24d52bebe53..04fab85c163a9 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -41,9 +41,18 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.OriginalIndices; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.ListPitInfo; +import org.opensearch.action.search.PitSearchContextIdForNode; +import org.opensearch.action.search.PitSearchContextIdForNode; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchShardTask; import org.opensearch.action.search.SearchType; +import org.opensearch.action.search.UpdatePitContextRequest; +import org.opensearch.action.search.UpdatePitContextResponse; import org.opensearch.action.support.TransportActions; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.service.ClusterService; @@ -111,6 +120,7 @@ import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.InternalScrollSearchRequest; import org.opensearch.search.internal.LegacyReaderContext; +import org.opensearch.search.internal.PitReaderContext; import org.opensearch.search.internal.ReaderContext; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.internal.ShardSearchContextId; @@ -135,6 +145,8 @@ import org.opensearch.transport.TransportRequest; import java.io.IOException; +import java.util.ArrayList; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -166,6 +178,15 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.NodeScope, Property.Dynamic ); + /** + * This setting will help validate the max keep alive that can be set during creation or extension for a PIT reader context + */ + public static final Setting MAX_PIT_KEEPALIVE_SETTING = Setting.positiveTimeSetting( + "point_in_time.max_keep_alive", + timeValueHours(24), + Property.NodeScope, + Property.Dynamic + ); public static final Setting MAX_KEEPALIVE_SETTING = Setting.positiveTimeSetting( "search.max_keep_alive", timeValueHours(24), @@ -218,6 +239,19 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.NodeScope ); + /** + * This setting defines the maximum number of active PIT reader contexts in the node , since each PIT context + * has a resource cost attached to it. This setting is less than scroll since users are + * encouraged to share the PIT details. + */ + public static final Setting MAX_OPEN_PIT_CONTEXT = Setting.intSetting( + "search.max_open_pit_context", + 300, + 0, + Property.Dynamic, + Property.NodeScope + ); + public static final int DEFAULT_SIZE = 10; public static final int DEFAULT_FROM = 0; @@ -243,6 +277,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private volatile long maxKeepAlive; + private volatile long maxPitKeepAlive; + private volatile TimeValue defaultSearchTimeout; private volatile boolean defaultAllowPartialSearchResults; @@ -251,6 +287,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private volatile int maxOpenScrollContext; + private volatile int maxOpenPitContext; + private final Cancellable keepAliveReaper; private final AtomicLong idGenerator = new AtomicLong(); @@ -260,6 +298,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private final MultiBucketConsumerService multiBucketConsumerService; private final AtomicInteger openScrollContexts = new AtomicInteger(); + private final AtomicInteger openPitContexts = new AtomicInteger(); private final String sessionId = UUIDs.randomBase64UUID(); private final Executor indexSearcherExecutor; @@ -293,6 +332,14 @@ public SearchService( TimeValue keepAliveInterval = KEEPALIVE_INTERVAL_SETTING.get(settings); setKeepAlives(DEFAULT_KEEPALIVE_SETTING.get(settings), MAX_KEEPALIVE_SETTING.get(settings)); + setPitKeepAlives(DEFAULT_KEEPALIVE_SETTING.get(settings), MAX_PIT_KEEPALIVE_SETTING.get(settings)); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer( + DEFAULT_KEEPALIVE_SETTING, + MAX_PIT_KEEPALIVE_SETTING, + this::setPitKeepAlives, + this::validatePitKeepAlives + ); clusterService.getClusterSettings() .addSettingsUpdateConsumer(DEFAULT_KEEPALIVE_SETTING, MAX_KEEPALIVE_SETTING, this::setKeepAlives, this::validateKeepAlives); @@ -309,6 +356,9 @@ public SearchService( maxOpenScrollContext = MAX_OPEN_SCROLL_CONTEXT.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_OPEN_SCROLL_CONTEXT, this::setMaxOpenScrollContext); + maxOpenPitContext = MAX_OPEN_PIT_CONTEXT.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_OPEN_PIT_CONTEXT, this::setMaxOpenPitContext); + lowLevelCancellation = LOW_LEVEL_CANCELLATION_SETTING.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(LOW_LEVEL_CANCELLATION_SETTING, this::setLowLevelCancellation); } @@ -331,12 +381,38 @@ private void validateKeepAlives(TimeValue defaultKeepAlive, TimeValue maxKeepAli } } + /** + * Default keep alive search setting should be less than max PIT keep alive + */ + private void validatePitKeepAlives(TimeValue defaultKeepAlive, TimeValue maxPitKeepAlive) { + if (defaultKeepAlive.millis() > maxPitKeepAlive.millis()) { + throw new IllegalArgumentException( + "Default keep alive setting for request [" + + DEFAULT_KEEPALIVE_SETTING.getKey() + + "]" + + " should be smaller than max keep alive for PIT [" + + MAX_PIT_KEEPALIVE_SETTING.getKey() + + "], " + + "was (" + + defaultKeepAlive + + " > " + + maxPitKeepAlive + + ")" + ); + } + } + private void setKeepAlives(TimeValue defaultKeepAlive, TimeValue maxKeepAlive) { validateKeepAlives(defaultKeepAlive, maxKeepAlive); this.defaultKeepAlive = defaultKeepAlive.millis(); this.maxKeepAlive = maxKeepAlive.millis(); } + private void setPitKeepAlives(TimeValue defaultKeepAlive, TimeValue maxPitKeepAlive) { + validatePitKeepAlives(defaultKeepAlive, maxPitKeepAlive); + this.maxPitKeepAlive = maxPitKeepAlive.millis(); + } + private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) { this.defaultSearchTimeout = defaultSearchTimeout; } @@ -353,6 +429,10 @@ private void setMaxOpenScrollContext(int maxOpenScrollContext) { this.maxOpenScrollContext = maxOpenScrollContext; } + private void setMaxOpenPitContext(int maxOpenPitContext) { + this.maxOpenPitContext = maxOpenPitContext; + } + private void setLowLevelCancellation(Boolean lowLevelCancellation) { this.lowLevelCancellation = lowLevelCancellation; } @@ -793,32 +873,101 @@ final ReaderContext createAndPutReaderContext( * Opens the reader context for given shardId. The newly opened reader context will be keep * until the {@code keepAlive} elapsed unless it is manually released. */ - public void openReaderContext(ShardId shardId, TimeValue keepAlive, ActionListener listener) { - checkKeepAliveLimit(keepAlive.millis()); + public void createPitReaderContext(ShardId shardId, TimeValue keepAlive, ActionListener listener) { + checkPitKeepAliveLimit(keepAlive.millis()); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); final IndexShard shard = indexService.getShard(shardId.id()); final SearchOperationListener searchOperationListener = shard.getSearchOperationListener(); shard.awaitShardSearchActive(ignored -> { Engine.SearcherSupplier searcherSupplier = null; ReaderContext readerContext = null; + Releasable decreasePitContexts = openPitContexts::decrementAndGet; try { + if (openPitContexts.incrementAndGet() > maxOpenPitContext) { + throw new OpenSearchRejectedExecutionException( + "Trying to create too many Point In Time contexts. Must be less than or equal to: [" + + maxOpenPitContext + + "]. " + + "This limit can be set by changing the [" + + MAX_OPEN_PIT_CONTEXT.getKey() + + "] setting." + ); + } searcherSupplier = shard.acquireSearcherSupplier(); final ShardSearchContextId id = new ShardSearchContextId(sessionId, idGenerator.incrementAndGet()); - readerContext = new ReaderContext(id, indexService, shard, searcherSupplier, keepAlive.millis(), false); + readerContext = new PitReaderContext(id, indexService, shard, searcherSupplier, keepAlive.millis(), false); final ReaderContext finalReaderContext = readerContext; searcherSupplier = null; // transfer ownership to reader context + searchOperationListener.onNewReaderContext(readerContext); - readerContext.addOnClose(() -> searchOperationListener.onFreeReaderContext(finalReaderContext)); + searchOperationListener.onNewPitContext(finalReaderContext); + + readerContext.addOnClose(() -> { + searchOperationListener.onFreeReaderContext(finalReaderContext); + searchOperationListener.onFreePitContext(finalReaderContext); + }); + readerContext.addOnClose(decreasePitContexts); + // add the newly created pit reader context to active readers putReaderContext(readerContext); readerContext = null; listener.onResponse(finalReaderContext.id()); } catch (Exception exc) { + Releasables.closeWhileHandlingException(decreasePitContexts); Releasables.closeWhileHandlingException(searcherSupplier, readerContext); listener.onFailure(exc); } }); } + /** + * Update PIT reader with pit id, keep alive and created time etc + */ + public void updatePitIdAndKeepAlive(UpdatePitContextRequest request, ActionListener listener) { + checkPitKeepAliveLimit(request.getKeepAlive()); + PitReaderContext readerContext = getPitReaderContext(request.getSearchContextId()); + if (readerContext == null) { + throw new SearchContextMissingException(request.getSearchContextId()); + } + Releasable updatePit = null; + try { + updatePit = readerContext.updatePitIdAndKeepAlive(request.getKeepAlive(), request.getPitId(), request.getCreationTime()); + listener.onResponse(new UpdatePitContextResponse(request.getPitId(), request.getCreationTime(), request.getKeepAlive())); + } catch (Exception e) { + freeReaderContext(readerContext.id()); + listener.onFailure(e); + } finally { + if (updatePit != null) { + updatePit.close(); + } + } + } + + /** + * Returns pit reader context based on ID + */ + public PitReaderContext getPitReaderContext(ShardSearchContextId id) { + ReaderContext context = activeReaders.get(id.getId()); + if (context instanceof PitReaderContext) { + return (PitReaderContext) context; + } + return null; + } + + /** + * This method returns all active PIT reader contexts + */ + public List getAllPITReaderContexts() { + final List pitContextsInfo = new ArrayList<>(); + for (ReaderContext ctx : activeReaders.values()) { + if (ctx instanceof PitReaderContext) { + final PitReaderContext context = (PitReaderContext) ctx; + ListPitInfo pitInfo = new ListPitInfo(context.getPitId(), context.getCreationTime(), context.getKeepAlive()); + pitContextsInfo.add(pitInfo); + } + } + return pitContextsInfo; + } + final SearchContext createContext( ReaderContext readerContext, ShardSearchRequest request, @@ -940,11 +1089,50 @@ public void freeAllScrollContexts() { } } + /** + * Free reader contexts if found + * @return response with list of PIT IDs deleted and if operation is successful + */ + public DeletePitResponse freeReaderContextsIfFound(List contextIds) { + List deleteResults = new ArrayList<>(); + for (PitSearchContextIdForNode contextId : contextIds) { + try { + if (getReaderContext(contextId.getSearchContextIdForNode().getSearchContextId()) != null) { + try (ReaderContext context = removeReaderContext(contextId.getSearchContextIdForNode().getSearchContextId().getId())) { + PitReaderContext pitReaderContext = (PitReaderContext) context; + if (context == null) { + DeletePitInfo deletePitInfo = new DeletePitInfo(true, contextId.getPitId()); + deleteResults.add(deletePitInfo); + continue; + } + String pitId = pitReaderContext.getPitId(); + boolean success = context != null; + DeletePitInfo deletePitInfo = new DeletePitInfo(success, pitId); + deleteResults.add(deletePitInfo); + } + } else { + // For search context missing cases, mark the operation as succeeded + DeletePitInfo deletePitInfo = new DeletePitInfo(true, contextId.getPitId()); + deleteResults.add(deletePitInfo); + } + } catch (SearchContextMissingException e) { + // For search context missing cases, mark the operation as succeeded + DeletePitInfo deletePitInfo = new DeletePitInfo(true, contextId.getPitId()); + deleteResults.add(deletePitInfo); + } + } + return new DeletePitResponse(deleteResults); + } + private long getKeepAlive(ShardSearchRequest request) { if (request.scroll() != null) { return getScrollKeepAlive(request.scroll()); } else if (request.keepAlive() != null) { - checkKeepAliveLimit(request.keepAlive().millis()); + if (getReaderContext(request.readerId()) instanceof PitReaderContext) { + checkPitKeepAliveLimit(request.keepAlive().millis()); + } else { + checkKeepAliveLimit(request.keepAlive().millis()); + } return request.keepAlive().getMillis(); } else { return request.readerId() == null ? defaultKeepAlive : -1; @@ -975,6 +1163,25 @@ private void checkKeepAliveLimit(long keepAlive) { } } + /** + * check if request keep alive is greater than max keep alive + */ + private void checkPitKeepAliveLimit(long keepAlive) { + if (keepAlive > maxPitKeepAlive) { + throw new IllegalArgumentException( + "Keep alive for request (" + + TimeValue.timeValueMillis(keepAlive) + + ") is too large. " + + "It must be less than (" + + TimeValue.timeValueMillis(maxPitKeepAlive) + + "). " + + "This limit can be set by changing the [" + + MAX_PIT_KEEPALIVE_SETTING.getKey() + + "] cluster level setting." + ); + } + } + private ActionListener wrapFailureListener(ActionListener listener, ReaderContext context, Releasable releasable) { return new ActionListener() { @Override @@ -1165,8 +1372,8 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc } if (source.slice() != null) { - if (context.scrollContext() == null) { - throw new SearchException(shardTarget, "`slice` cannot be used outside of a scroll context"); + if (context.scrollContext() == null && !(context.readerContext() instanceof PitReaderContext)) { + throw new SearchException(shardTarget, "`slice` cannot be used outside of a scroll context or PIT context"); } context.sliceBuilder(source.slice()); } diff --git a/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java new file mode 100644 index 0000000000000..b24a8a4172e29 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java @@ -0,0 +1,94 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.internal; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.index.IndexService; +import org.opensearch.index.engine.Engine; +import org.opensearch.index.engine.Segment; +import org.opensearch.index.shard.IndexShard; + +import java.util.Collections; +import java.util.List; + +/** + * PIT reader context containing PIT specific information such as pit id, create time etc. + */ +public class PitReaderContext extends ReaderContext { + + // Storing the encoded PIT ID as part of PIT reader context for use cases such as list pit API + private final SetOnce pitId = new SetOnce<>(); + // Creation time of PIT contexts which helps users to differentiate between multiple PIT reader contexts + private final SetOnce creationTime = new SetOnce<>(); + /** + * Shard routing at the time of creation of PIT Reader Context + */ + private final ShardRouting shardRouting; + + /** + * Encapsulates segments constituting the shard at the time of creation of PIT Reader Context. + */ + private final List segments; + + public PitReaderContext( + ShardSearchContextId id, + IndexService indexService, + IndexShard indexShard, + Engine.SearcherSupplier searcherSupplier, + long keepAliveInMillis, + boolean singleSession + ) { + super(id, indexService, indexShard, searcherSupplier, keepAliveInMillis, singleSession); + shardRouting = indexShard.routingEntry(); + segments = indexShard.segments(true); + } + + public String getPitId() { + return this.pitId.get(); + } + + public void setPitId(final String pitId) { + this.pitId.set(pitId); + } + + /** + * Returns a releasable to indicate that the caller has stopped using this reader. + * The pit id can be updated and time to live of the reader usage can be extended using the provided + * keepAliveInMillis. + */ + public Releasable updatePitIdAndKeepAlive(long keepAliveInMillis, String pitId, long createTime) { + getRefCounted().incRef(); + tryUpdateKeepAlive(keepAliveInMillis); + setPitId(pitId); + setCreationTime(createTime); + return Releasables.releaseOnce(() -> { + updateLastAccessTime(); + getRefCounted().decRef(); + }); + } + + public long getCreationTime() { + return this.creationTime.get() == null ? 0 : this.creationTime.get(); + } + + public void setCreationTime(final long creationTime) { + this.creationTime.set(creationTime); + } + + public ShardRouting getShardRouting() { + return shardRouting; + } + + public List getSegments() { + return Collections.unmodifiableList(segments); + } +} diff --git a/server/src/main/java/org/opensearch/search/internal/ReaderContext.java b/server/src/main/java/org/opensearch/search/internal/ReaderContext.java index 5bcc491f4ffdb..898aa2e2c6745 100644 --- a/server/src/main/java/org/opensearch/search/internal/ReaderContext.java +++ b/server/src/main/java/org/opensearch/search/internal/ReaderContext.java @@ -105,10 +105,22 @@ public void validate(TransportRequest request) { indexShard.getSearchOperationListener().validateReaderContext(this, request); } - private long nowInMillis() { + protected AbstractRefCounted getRefCounted() { + return refCounted; + } + + protected void updateLastAccessTime() { + this.lastAccessTime.updateAndGet(curr -> Math.max(curr, nowInMillis())); + } + + protected long nowInMillis() { return indexShard.getThreadPool().relativeTimeInMillis(); } + public long getKeepAlive() { + return keepAlive.get(); + } + @Override public final void close() { if (closed.compareAndSet(false, true)) { @@ -140,7 +152,10 @@ public Engine.Searcher acquireSearcher(String source) { return searcherSupplier.acquireSearcher(source); } - private void tryUpdateKeepAlive(long keepAlive) { + /** + * Update keep alive if it is greater than current keep alive + */ + public void tryUpdateKeepAlive(long keepAlive) { this.keepAlive.updateAndGet(curr -> Math.max(curr, keepAlive)); } diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java new file mode 100644 index 0000000000000..c03c27f7d7e4d --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -0,0 +1,535 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.lucene.search.TotalHits; +import org.junit.Before; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.query.IdsQueryBuilder; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskId; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.RemoteClusterConnectionTests; +import org.opensearch.transport.Transport; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.opensearch.action.search.PitTestsUtil.getPitId; + +/** + * Functional tests for various methods in create pit controller. Covers update pit phase specifically since + * integration tests don't cover it. + */ +public class CreatePitControllerTests extends OpenSearchTestCase { + + DiscoveryNode node1 = null; + DiscoveryNode node2 = null; + DiscoveryNode node3 = null; + String pitId = null; + TransportSearchAction transportSearchAction = null; + Task task = null; + DiscoveryNodes nodes = null; + NamedWriteableRegistry namedWriteableRegistry = null; + SearchResponse searchResponse = null; + ActionListener createPitListener = null; + ClusterService clusterServiceMock = null; + + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + Settings settings = Settings.builder().put("node.name", CreatePitControllerTests.class.getSimpleName()).build(); + NodeClient client = new NodeClient(settings, threadPool); + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + private MockTransportService startTransport(String id, List knownNodes, Version version) { + return startTransport(id, knownNodes, version, Settings.EMPTY); + } + + private MockTransportService startTransport( + final String id, + final List knownNodes, + final Version version, + final Settings settings + ) { + return RemoteClusterConnectionTests.startTransport(id, knownNodes, version, threadPool, settings); + } + + @Before + public void setupData() { + node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); + pitId = getPitId(); + namedWriteableRegistry = new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(QueryBuilder.class, TermQueryBuilder.NAME, TermQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, MatchAllQueryBuilder.NAME, MatchAllQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, IdsQueryBuilder.NAME, IdsQueryBuilder::new) + ) + ); + nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); + transportSearchAction = mock(TransportSearchAction.class); + task = new Task( + randomLong(), + "transport", + SearchAction.NAME, + "description", + new TaskId(randomLong() + ":" + randomLong()), + Collections.emptyMap() + ); + InternalSearchResponse response = new InternalSearchResponse( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + InternalAggregations.EMPTY, + null, + null, + false, + null, + 1 + ); + searchResponse = new SearchResponse( + response, + null, + 3, + 3, + 0, + 100, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY, + pitId + ); + createPitListener = new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + assertEquals(3, createPITResponse.getTotalShards()); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }; + + clusterServiceMock = mock(ClusterService.class); + ClusterState state = mock(ClusterState.class); + + final Settings keepAliveSettings = Settings.builder().put(CreatePitController.PIT_INIT_KEEP_ALIVE.getKey(), 30000).build(); + when(clusterServiceMock.getSettings()).thenReturn(keepAliveSettings); + + when(state.getMetadata()).thenReturn(Metadata.EMPTY_METADATA); + when(state.metadata()).thenReturn(Metadata.EMPTY_METADATA); + when(clusterServiceMock.state()).thenReturn(state); + when(state.getNodes()).thenReturn(nodes); + } + + /** + * Test if transport call for update pit is made to all nodes present as part of PIT ID returned from phase one of create pit + */ + public void testUpdatePitAfterCreatePitSuccess() throws InterruptedException { + List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener listener + ) { + updateNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new UpdatePitContextResponse("pitid", 500000, 500000))); + t.start(); + } + + /** + * Test if cleanup request is called + */ + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + + CountDownLatch latch = new CountDownLatch(1); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); + CreatePitController controller = new CreatePitController( + searchTransportService, + clusterServiceMock, + transportSearchAction, + namedWriteableRegistry, + pitService + ); + + ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + assertEquals(3, createPITResponse.getTotalShards()); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }, latch); + + StepListener createListener = new StepListener<>(); + controller.executeCreatePit(request, task, createListener, updatelistener); + createListener.onResponse(searchResponse); + latch.await(); + assertEquals(3, updateNodesInvoked.size()); + assertEquals(0, deleteNodesInvoked.size()); + } + } + } + + /** + * If create phase results in failure, update pit phase should not proceed and propagate the exception + */ + public void testUpdatePitAfterCreatePitFailure() throws InterruptedException { + List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener listener + ) { + updateNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new UpdatePitContextResponse("pitid", 500000, 500000))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + }; + + CountDownLatch latch = new CountDownLatch(1); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); + CreatePitController controller = new CreatePitController( + searchTransportService, + clusterServiceMock, + transportSearchAction, + namedWriteableRegistry, + pitService + ); + + ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + throw new AssertionError("on response is called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e.getCause().getMessage().contains("Exception occurred in phase 1")); + } + }, latch); + + StepListener createListener = new StepListener<>(); + + controller.executeCreatePit(request, task, createListener, updatelistener); + createListener.onFailure(new Exception("Exception occurred in phase 1")); + latch.await(); + assertEquals(0, updateNodesInvoked.size()); + /** + * cleanup is not called on create pit phase one failure + */ + assertEquals(0, deleteNodesInvoked.size()); + } + } + } + + /** + * Testing that any update pit failures fails the request + */ + public void testUpdatePitFailureForNodeDrop() throws InterruptedException { + List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener listener + ) { + + updateNodesInvoked.add(connection.getNode()); + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new UpdatePitContextResponse("pitid", 500000, 500000))); + t.start(); + } + } + + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); + CreatePitController controller = new CreatePitController( + searchTransportService, + clusterServiceMock, + transportSearchAction, + namedWriteableRegistry, + pitService + ); + + CountDownLatch latch = new CountDownLatch(1); + + ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + throw new AssertionError("response is called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e.getMessage().contains("node 3 down")); + } + }, latch); + + StepListener createListener = new StepListener<>(); + controller.executeCreatePit(request, task, createListener, updatelistener); + createListener.onResponse(searchResponse); + latch.await(); + assertEquals(3, updateNodesInvoked.size()); + /** + * check if cleanup is called for all nodes in case of update pit failure + */ + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testUpdatePitFailureWhereAllNodesDown() throws InterruptedException { + List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener listener + ) { + updateNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onFailure(new Exception("node down"))); + t.start(); + } + + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); + CreatePitController controller = new CreatePitController( + searchTransportService, + clusterServiceMock, + transportSearchAction, + namedWriteableRegistry, + pitService + ); + + CountDownLatch latch = new CountDownLatch(1); + + ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + throw new AssertionError("response is called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e.getMessage().contains("node down")); + } + }, latch); + + StepListener createListener = new StepListener<>(); + controller.executeCreatePit(request, task, createListener, updatelistener); + createListener.onResponse(searchResponse); + latch.await(); + assertEquals(3, updateNodesInvoked.size()); + /** + * check if cleanup is called for all nodes in case of update pit failure + */ + assertEquals(3, deleteNodesInvoked.size()); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java new file mode 100644 index 0000000000000..3962a4a11fc90 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java @@ -0,0 +1,173 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.junit.Assert; +import org.opensearch.Version; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.opensearch.action.admin.indices.segments.PitSegmentsAction; +import org.opensearch.action.admin.indices.segments.PitSegmentsRequest; +import org.opensearch.client.Client; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.index.query.IdsQueryBuilder; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.shard.ShardId; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.search.internal.AliasFilter; +import org.opensearch.search.internal.ShardSearchContextId; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.opensearch.test.OpenSearchTestCase.between; +import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; +import static org.opensearch.test.OpenSearchTestCase.randomBoolean; + +/** + * Helper class for common pit tests functions + */ +public class PitTestsUtil { + private PitTestsUtil() {} + + public static QueryBuilder randomQueryBuilder() { + if (randomBoolean()) { + return new TermQueryBuilder(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } else if (randomBoolean()) { + return new MatchAllQueryBuilder(); + } else { + return new IdsQueryBuilder().addIds(randomAlphaOfLength(10)); + } + } + + public static String getPitId() { + AtomicArray array = new AtomicArray<>(3); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult1 = new SearchAsyncActionTests.TestSearchPhaseResult( + new ShardSearchContextId("a", 1), + null + ); + testSearchPhaseResult1.setSearchShardTarget(new SearchShardTarget("node_1", new ShardId("idx", "uuid1", 2), null, null)); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult2 = new SearchAsyncActionTests.TestSearchPhaseResult( + new ShardSearchContextId("b", 12), + null + ); + testSearchPhaseResult2.setSearchShardTarget(new SearchShardTarget("node_2", new ShardId("idy", "uuid2", 42), null, null)); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult3 = new SearchAsyncActionTests.TestSearchPhaseResult( + new ShardSearchContextId("c", 42), + null + ); + testSearchPhaseResult3.setSearchShardTarget(new SearchShardTarget("node_3", new ShardId("idy", "uuid2", 43), null, null)); + array.setOnce(0, testSearchPhaseResult1); + array.setOnce(1, testSearchPhaseResult2); + array.setOnce(2, testSearchPhaseResult3); + + final Version version = Version.CURRENT; + final Map aliasFilters = new HashMap<>(); + for (SearchPhaseResult result : array.asList()) { + final AliasFilter aliasFilter; + if (randomBoolean()) { + aliasFilter = new AliasFilter(randomQueryBuilder()); + } else if (randomBoolean()) { + aliasFilter = new AliasFilter(randomQueryBuilder(), "alias-" + between(1, 10)); + } else { + aliasFilter = AliasFilter.EMPTY; + } + if (randomBoolean()) { + aliasFilters.put(result.getSearchShardTarget().getShardId().getIndex().getUUID(), aliasFilter); + } + } + return SearchContextId.encode(array.asList(), aliasFilters, version); + } + + public static void assertUsingGetAllPits(Client client, String id, long creationTime) throws ExecutionException, InterruptedException { + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client.admin().cluster().state(clusterStateRequest).get(); + final List nodes = new LinkedList<>(); + for (ObjectCursor cursor : clusterStateResponse.getState().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(disNodesArr); + ActionFuture execute1 = client.execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); + GetAllPitNodesResponse getPitResponse = execute1.get(); + assertTrue(getPitResponse.getPitInfos().get(0).getPitId().contains(id)); + Assert.assertEquals(getPitResponse.getPitInfos().get(0).getCreationTime(), creationTime); + } + + public static void assertGetAllPitsEmpty(Client client) throws ExecutionException, InterruptedException { + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client.admin().cluster().state(clusterStateRequest).get(); + final List nodes = new LinkedList<>(); + for (ObjectCursor cursor : clusterStateResponse.getState().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(disNodesArr); + ActionFuture execute1 = client.execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); + GetAllPitNodesResponse getPitResponse = execute1.get(); + Assert.assertEquals(0, getPitResponse.getPitInfos().size()); + } + + public static void assertSegments(boolean isEmpty, String index, long expectedShardSize, Client client, String pitId) { + PitSegmentsRequest pitSegmentsRequest; + pitSegmentsRequest = new PitSegmentsRequest(); + List pitIds = new ArrayList<>(); + pitIds.add(pitId); + pitSegmentsRequest.clearAndSetPitIds(pitIds); + IndicesSegmentResponse indicesSegmentResponse = client.execute(PitSegmentsAction.INSTANCE, pitSegmentsRequest).actionGet(); + assertTrue(indicesSegmentResponse.getShardFailures() == null || indicesSegmentResponse.getShardFailures().length == 0); + assertEquals(indicesSegmentResponse.getIndices().isEmpty(), isEmpty); + if (!isEmpty) { + assertTrue(indicesSegmentResponse.getIndices().get(index) != null); + assertTrue(indicesSegmentResponse.getIndices().get(index).getIndex().equalsIgnoreCase(index)); + assertEquals(expectedShardSize, indicesSegmentResponse.getIndices().get(index).getShards().size()); + } + } + + public static void assertSegments(boolean isEmpty, String index, long expectedShardSize, Client client) { + PitSegmentsRequest pitSegmentsRequest = new PitSegmentsRequest("_all"); + IndicesSegmentResponse indicesSegmentResponse = client.execute(PitSegmentsAction.INSTANCE, pitSegmentsRequest).actionGet(); + assertTrue(indicesSegmentResponse.getShardFailures() == null || indicesSegmentResponse.getShardFailures().length == 0); + assertEquals(indicesSegmentResponse.getIndices().isEmpty(), isEmpty); + if (!isEmpty) { + assertTrue(indicesSegmentResponse.getIndices().get(index) != null); + assertTrue(indicesSegmentResponse.getIndices().get(index).getIndex().equalsIgnoreCase(index)); + assertEquals(expectedShardSize, indicesSegmentResponse.getIndices().get(index).getShards().size()); + } + } + + public static void assertSegments(boolean isEmpty, Client client) { + assertSegments(isEmpty, "index", 2, client); + } + + public static void assertSegments(boolean isEmpty, Client client, String pitId) { + assertSegments(isEmpty, "index", 2, client, pitId); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java new file mode 100644 index 0000000000000..d6de562d616fa --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -0,0 +1,675 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.action.search; + +import org.junit.Before; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilter; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.client.node.NodeClient; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.IdsQueryBuilder; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskId; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.RemoteClusterConnectionTests; +import org.opensearch.transport.Transport; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.opensearch.action.search.PitTestsUtil.getPitId; +import static org.opensearch.action.support.PlainActionFuture.newFuture; + +/** + * Functional tests for transport delete pit action + */ +public class TransportDeletePitActionTests extends OpenSearchTestCase { + DiscoveryNode node1 = null; + DiscoveryNode node2 = null; + DiscoveryNode node3 = null; + String pitId = null; + TransportSearchAction transportSearchAction = null; + Task task = null; + DiscoveryNodes nodes = null; + NamedWriteableRegistry namedWriteableRegistry = null; + ClusterService clusterServiceMock = null; + Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); + private ThreadPool threadPool = new ThreadPool(settings); + NodeClient client = new NodeClient(settings, threadPool); + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + private MockTransportService startTransport(String id, List knownNodes, Version version) { + return startTransport(id, knownNodes, version, Settings.EMPTY); + } + + private MockTransportService startTransport( + final String id, + final List knownNodes, + final Version version, + final Settings settings + ) { + return RemoteClusterConnectionTests.startTransport(id, knownNodes, version, threadPool, settings); + } + + @Before + public void setupData() { + node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); + pitId = getPitId(); + namedWriteableRegistry = new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(QueryBuilder.class, TermQueryBuilder.NAME, TermQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, MatchAllQueryBuilder.NAME, MatchAllQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, IdsQueryBuilder.NAME, IdsQueryBuilder::new) + ) + ); + nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); + transportSearchAction = mock(TransportSearchAction.class); + task = new Task( + randomLong(), + "transport", + SearchAction.NAME, + "description", + new TaskId(randomLong() + ":" + randomLong()), + Collections.emptyMap() + ); + + clusterServiceMock = mock(ClusterService.class); + ClusterState state = mock(ClusterState.class); + + final Settings keepAliveSettings = Settings.builder().put(CreatePitController.PIT_INIT_KEEP_ALIVE.getKey(), 30000).build(); + when(clusterServiceMock.getSettings()).thenReturn(keepAliveSettings); + + when(state.getMetadata()).thenReturn(Metadata.EMPTY_METADATA); + when(state.metadata()).thenReturn(Metadata.EMPTY_METADATA); + when(clusterServiceMock.state()).thenReturn(state); + when(state.getNodes()).thenReturn(nodes); + } + + /** + * Test if transport call for update pit is made to all nodes present as part of PIT ID returned from phase one of create pit + */ + public void testDeletePitSuccess() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + DeletePitInfo deletePitInfo = new DeletePitInfo(true, "pitId"); + List deletePitInfos = new ArrayList<>(); + deletePitInfos.add(deletePitInfo); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(deletePitInfos))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + DeletePitResponse dr = future.get(); + assertTrue(dr.getDeletePitResults().get(0).getPitId().equals("pitId")); + assertTrue(dr.getDeletePitResults().get(0).isSuccessful()); + assertEquals(3, deleteNodesInvoked.size()); + + } + } + } + + public void testDeleteAllPITSuccess() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + final ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + DeletePitInfo deletePitInfo = new DeletePitInfo(true, "pitId"); + List deletePitInfos = new ArrayList<>(); + deletePitInfos.add(deletePitInfo); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(deletePitInfos))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { + @Override + public void getAllPits(ActionListener getAllPitsListener) { + ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); + List list = new ArrayList<>(); + list.add(listPitInfo); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse( + cluster1Transport.getLocalDiscoNode(), + list + ); + List nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + DeletePitResponse dr = future.get(); + assertTrue(dr.getDeletePitResults().get(0).getPitId().equals("pitId")); + assertTrue(dr.getDeletePitResults().get(0).isSuccessful()); + assertEquals(3, deleteNodesInvoked.size()); + + } + } + } + + public void testDeletePitWhenNodeIsDown() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node 3 down")); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeletePitWhenAllNodesAreDown() { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node 3 down")); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeletePitFailure() { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextId, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onFailure(new Exception("node down"))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client); + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node down")); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeleteAllPitWhenNodeIsDown() { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + final ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { + @Override + public void getAllPits(ActionListener getAllPitsListener) { + ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); + List list = new ArrayList<>(); + list.add(listPitInfo); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse( + cluster1Transport.getLocalDiscoNode(), + list + ); + List nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node 3 down")); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeleteAllPitWhenAllNodesAreDown() { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + @Override + public void sendFreePITContexts( + Transport.Connection connection, + List contextIds, + final ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onFailure(new Exception("node down"))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { + @Override + public void getAllPits(ActionListener getAllPitsListener) { + ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); + List list = new ArrayList<>(); + list.add(listPitInfo); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse( + cluster1Transport.getLocalDiscoNode(), + list + ); + List nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node down")); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeleteAllPitFailure() { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + public void sendFreePITContexts( + Transport.Connection connection, + List contextId, + final ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 is down"))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); + t.start(); + } + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + PitService pitService = new PitService(clusterServiceMock, searchTransportService, transportService, client) { + @Override + public void getAllPits(ActionListener getAllPitsListener) { + ListPitInfo listPitInfo = new ListPitInfo(getPitId(), 0, 0); + List list = new ArrayList<>(); + list.add(listPitInfo); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse( + cluster1Transport.getLocalDiscoNode(), + list + ); + List nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("java.lang.Exception: node 3 is down")); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } +} diff --git a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java index 4682d35411b78..7d2d8e38d066e 100644 --- a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java +++ b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java @@ -45,9 +45,9 @@ public void testShardLevelSearchGroupStats() throws Exception { // let's create two dummy search stats with groups Map groupStats1 = new HashMap<>(); Map groupStats2 = new HashMap<>(); - groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); - SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); - SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); + groupStats2.put("group1", new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)); + SearchStats searchStats1 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats1); + SearchStats searchStats2 = new SearchStats(new Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), 0, groupStats2); // adding these two search stats and checking group stats are correct searchStats1.add(searchStats2); @@ -75,6 +75,9 @@ private static void assertStats(Stats stats, long equalTo) { assertEquals(equalTo, stats.getScrollCount()); assertEquals(equalTo, stats.getScrollTimeInMillis()); assertEquals(equalTo, stats.getScrollCurrent()); + assertEquals(equalTo, stats.getPitCount()); + assertEquals(equalTo, stats.getPitTimeInMillis()); + assertEquals(equalTo, stats.getPitCurrent()); assertEquals(equalTo, stats.getSuggestCount()); assertEquals(equalTo, stats.getSuggestTimeInMillis()); assertEquals(equalTo, stats.getSuggestCurrent()); diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java index ed3aa19afa146..a8679a087216d 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java @@ -134,8 +134,8 @@ public void testBuildTable() { assertThat(row.get(3).value, equalTo(shardRouting.state())); assertThat(row.get(6).value, equalTo(localNode.getHostAddress())); assertThat(row.get(7).value, equalTo(localNode.getId())); - assertThat(row.get(69).value, equalTo(shardStats.getDataPath())); - assertThat(row.get(70).value, equalTo(shardStats.getStatePath())); + assertThat(row.get(72).value, equalTo(shardStats.getDataPath())); + assertThat(row.get(73).value, equalTo(shardStats.getStatePath())); } } } diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java new file mode 100644 index 0000000000000..ae7f795f57ee7 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -0,0 +1,620 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.hamcrest.Matchers; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitController; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.PitTestsUtil; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.search.builder.PointInTimeBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.OpenSearchSingleNodeTestCase; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.IndicesService; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.not; +import static org.opensearch.action.search.PitTestsUtil.assertSegments; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.index.query.QueryBuilders.queryStringQuery; +import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +/** + * Single node integration tests for various PIT use cases such as create pit, search etc + */ +public class CreatePitSingleNodeTests extends OpenSearchSingleNodeTestCase { + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @Override + protected Settings nodeSettings() { + // very frequent checks + return Settings.builder() + .put(super.nodeSettings()) + .put(SearchService.KEEPALIVE_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(1)) + .put(CreatePitController.PIT_INIT_KEEP_ALIVE.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + public void testCreatePITSuccess() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client(), pitResponse.getId()); + client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + SearchResponse searchResponse = client().prepareSearch("index") + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertHitCount(searchResponse, 1); + + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + validatePitStats("index", 1, 0, 0); + validatePitStats("index", 1, 0, 1); + service.doClose(); // this kills the keep-alive reaper we have to reset the node after this test + assertSegments(true, client()); + validatePitStats("index", 0, 1, 0); + validatePitStats("index", 0, 1, 1); + } + + public void testCreatePITWithMultipleIndicesSuccess() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + createIndex("index1", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index", "index1" }); + SearchService service = getInstanceFromNode(SearchService.class); + + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse response = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), response.getId(), response.getCreationTime()); + assertSegments(false, client(), response.getId()); + assertEquals(4, response.getSuccessfulShards()); + assertEquals(4, service.getActiveContexts()); + + validatePitStats("index", 1, 0, 0); + validatePitStats("index1", 1, 0, 0); + service.doClose(); + assertSegments(true, client()); + validatePitStats("index", 0, 1, 0); + validatePitStats("index1", 0, 1, 0); + } + + public void testCreatePITWithShardReplicasSuccess() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client(), pitResponse.getId()); + client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + SearchResponse searchResponse = client().prepareSearch("index") + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertHitCount(searchResponse, 1); + + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + validatePitStats("index", 1, 0, 0); + validatePitStats("index", 1, 0, 1); + service.doClose(); + assertSegments(true, client()); + validatePitStats("index", 0, 1, 0); + validatePitStats("index", 0, 1, 1); + } + + public void testCreatePITWithNonExistentIndex() { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index", "index1" }); + SearchService service = getInstanceFromNode(SearchService.class); + + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + + assertTrue(ex.getMessage().contains("no such index [index1]")); + assertEquals(0, service.getActiveContexts()); + assertSegments(true, client()); + service.doClose(); + } + + public void testCreatePITOnCloseIndex() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + client().admin().indices().prepareClose("index").get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + + assertTrue(ex.getMessage().contains("IndexClosedException")); + + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(0, service.getActiveContexts()); + PitTestsUtil.assertGetAllPitsEmpty(client()); + assertSegments(true, client()); + service.doClose(); + } + + public void testPitSearchOnDeletedIndex() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + client().admin().indices().prepareDelete("index").get(); + + IndexNotFoundException ex = expectThrows(IndexNotFoundException.class, () -> { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + }); + assertTrue(ex.getMessage().contains("no such index [index]")); + SearchService service = getInstanceFromNode(SearchService.class); + PitTestsUtil.assertGetAllPitsEmpty(client()); + assertEquals(0, service.getActiveContexts()); + assertSegments(true, client()); + service.doClose(); + } + + public void testInvalidPitId() { + createIndex("idx"); + String id = "c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1"; + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(id).setKeepAlive(TimeValue.timeValueDays(1))) + .get() + ); + assertEquals("invalid id: [" + id + "]", e.getMessage()); + } + + public void testPitSearchOnCloseIndex() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client(), pitResponse.getId()); + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + validatePitStats("index", 1, 0, 0); + validatePitStats("index", 1, 0, 1); + + client().admin().indices().prepareClose("index").get(); + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> { + SearchResponse searchResponse = client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + }); + assertTrue(ex.shardFailures()[0].reason().contains("SearchContextMissingException")); + assertEquals(0, service.getActiveContexts()); + PitTestsUtil.assertGetAllPitsEmpty(client()); + assertSegments(true, client()); + // PIT reader contexts are lost after close, verifying it with open index api + client().admin().indices().prepareOpen("index").get(); + ex = expectThrows(SearchPhaseExecutionException.class, () -> { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + }); + assertTrue(ex.shardFailures()[0].reason().contains("SearchContextMissingException")); + assertEquals(0, service.getActiveContexts()); + service.doClose(); + } + + public void testMaxOpenPitContexts() throws Exception { + createIndex("index"); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + SearchService service = getInstanceFromNode(SearchService.class); + + for (int i = 0; i < SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); i++) { + client().execute(CreatePitAction.INSTANCE, request).get(); + } + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + + assertTrue( + ex.getMessage() + .contains( + "Trying to create too many Point In Time contexts. " + + "Must be less than or equal to: [" + + SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY) + + "]. " + + "This limit can be set by changing the [search.max_open_pit_context] setting." + ) + ); + final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); + validatePitStats("index", maxPitContexts, 0, 0); + service.doClose(); + validatePitStats("index", 0, maxPitContexts, 0); + } + + public void testCreatePitMoreThanMaxOpenPitContexts() throws Exception { + createIndex("index"); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + SearchService service = getInstanceFromNode(SearchService.class); + List pitIds = new ArrayList<>(); + + try { + for (int i = 0; i < 1000; i++) { + CreatePitResponse cpr = client().execute(CreatePitAction.INSTANCE, request).actionGet(); + if (cpr.getId() != null) pitIds.add(cpr.getId()); + } + } catch (Exception ex) { + assertTrue( + ((SearchPhaseExecutionException) ex).getDetailedMessage() + .contains( + "Trying to create too many Point In Time contexts. " + + "Must be less than or equal to: [" + + SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY) + + "]. " + + "This limit can be set by changing the [search.max_open_pit_context] setting." + ) + ); + } + final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); + validatePitStats("index", maxPitContexts, 0, 0); + // deleteall + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds.toArray(new String[pitIds.size()])); + + /** + * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + * not found exceptions don't result in failures ( as deletion in one node is successful ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); + assertTrue(deletePitInfo.isSuccessful()); + } + validatePitStats("index", 0, maxPitContexts, 0); + client().execute(CreatePitAction.INSTANCE, request).get(); + validatePitStats("index", 1, maxPitContexts, 0); + service.doClose(); + validatePitStats("index", 0, maxPitContexts + 1, 0); + } + + public void testOpenPitContextsConcurrently() throws Exception { + createIndex("index"); + final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + SearchService service = getInstanceFromNode(SearchService.class); + Thread[] threads = new Thread[randomIntBetween(2, 8)]; + CountDownLatch latch = new CountDownLatch(threads.length); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + latch.countDown(); + try { + latch.await(); + for (;;) { + try { + client().execute(CreatePitAction.INSTANCE, request).get(); + } catch (ExecutionException e) { + assertTrue( + e.getMessage() + .contains( + "Trying to create too many Point In Time contexts. " + + "Must be less than or equal to: [" + + SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY) + + "]. " + + "This limit can be set by changing the [" + + SearchService.MAX_OPEN_PIT_CONTEXT.getKey() + + "] setting." + ) + ); + return; + } + } + } catch (Exception e) { + throw new AssertionError(e); + } + }); + threads[i].setName("opensearch[node_s_0][search]"); + threads[i].start(); + } + for (Thread thread : threads) { + thread.join(); + } + assertThat(service.getActiveContexts(), equalTo(maxPitContexts)); + validatePitStats("index", maxPitContexts, 0, 0); + service.doClose(); + validatePitStats("index", 0, maxPitContexts, 0); + } + + /** + * Point in time search should return the same results as creation time and index updates should not affect the PIT search results + */ + public void testPitAfterUpdateIndex() throws Exception { + client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 5)).get(); + client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + + for (int i = 0; i < 50; i++) { + client().prepareIndex("test") + .setId(Integer.toString(i)) + .setSource( + jsonBuilder().startObject() + .field("user", "foobar") + .field("postDate", System.currentTimeMillis()) + .field("message", "test") + .endObject() + ) + .get(); + } + client().admin().indices().prepareRefresh().get(); + + // create pit + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueMinutes(2), true); + request.setIndices(new String[] { "test" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + SearchService service = getInstanceFromNode(SearchService.class); + + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(matchAllQuery()) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "test")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "test")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "update")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(0L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "update")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(0L) + ); + + // update index + SearchResponse searchResponse = client().prepareSearch() + .setQuery(queryStringQuery("user:foobar")) + .setSize(50) + .addSort("postDate", SortOrder.ASC) + .get(); + try { + do { + for (SearchHit searchHit : searchResponse.getHits().getHits()) { + Map map = searchHit.getSourceAsMap(); + map.put("message", "update"); + client().prepareIndex("test").setId(searchHit.getId()).setSource(map).get(); + } + searchResponse = client().prepareSearch().setSize(0).setQuery(termQuery("message", "test")).get(); + + } while (searchResponse.getHits().getHits().length > 0); + + client().admin().indices().prepareRefresh().get(); + assertThat( + client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get().getHits().getTotalHits().value, + Matchers.equalTo(50L) + ); + /** + * assert without point in time + */ + + assertThat( + client().prepareSearch().setSize(0).setQuery(termQuery("message", "test")).get().getHits().getTotalHits().value, + Matchers.equalTo(0L) + ); + assertThat( + client().prepareSearch().setSize(0).setQuery(termQuery("message", "test")).get().getHits().getTotalHits().value, + Matchers.equalTo(0L) + ); + assertThat( + client().prepareSearch().setSize(0).setQuery(termQuery("message", "update")).get().getHits().getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch().setSize(0).setQuery(termQuery("message", "update")).get().getHits().getTotalHits().value, + Matchers.equalTo(50L) + ); + /** + * using point in time id will have the same search results as ones before update + */ + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "test")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "test")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "update")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(0L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "update")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(0L) + ); + validatePitStats("test", 1, 0, 0); + } finally { + service.doClose(); + assertEquals(0, service.getActiveContexts()); + validatePitStats("test", 0, 1, 0); + PitTestsUtil.assertGetAllPitsEmpty(client()); + assertSegments(true, client()); + } + } + + public void testConcurrentSearches() throws Exception { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client(), pitResponse.getId()); + Thread[] threads = new Thread[5]; + CountDownLatch latch = new CountDownLatch(threads.length); + + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + latch.countDown(); + try { + latch.await(); + for (int j = 0; j < 50; j++) { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .execute() + .get(); + } + } catch (Exception e) { + throw new AssertionError(e); + } + }); + threads[i].setName("opensearch[node_s_0][search]"); + threads[i].start(); + } + for (Thread thread : threads) { + thread.join(); + } + + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + validatePitStats("index", 1, 0, 0); + validatePitStats("index", 1, 0, 1); + service.doClose(); + assertEquals(0, service.getActiveContexts()); + validatePitStats("index", 0, 1, 0); + validatePitStats("index", 0, 1, 1); + PitTestsUtil.assertGetAllPitsEmpty(client()); + } + + public void validatePitStats(String index, long expectedPitCurrent, long expectedPitCount, int shardId) throws ExecutionException, + InterruptedException { + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex(index)); + IndexShard indexShard = indexService.getShard(shardId); + assertEquals(expectedPitCurrent, indexShard.searchStats().getTotal().getPitCurrent()); + assertEquals(expectedPitCount, indexShard.searchStats().getTotal().getPitCount()); + } +} diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index 79184497b201c..96a4d9ad1d8d9 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -52,6 +52,7 @@ import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.cache.IndexCache; @@ -67,6 +68,7 @@ import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.LegacyReaderContext; +import org.opensearch.search.internal.PitReaderContext; import org.opensearch.search.internal.ReaderContext; import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; @@ -134,10 +136,12 @@ public void testPreProcess() throws Exception { int maxResultWindow = randomIntBetween(50, 100); int maxRescoreWindow = randomIntBetween(50, 100); int maxSlicesPerScroll = randomIntBetween(50, 100); + int maxSlicesPerPit = randomIntBetween(50, 100); Settings settings = Settings.builder() .put("index.max_result_window", maxResultWindow) .put("index.max_slices_per_scroll", maxSlicesPerScroll) .put("index.max_rescore_window", maxRescoreWindow) + .put("index.max_slices_per_pit", maxSlicesPerPit) .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) @@ -300,13 +304,13 @@ protected Engine.Searcher acquireSearcherInternal(String source) { ); readerContext.close(); - readerContext = new ReaderContext( + readerContext = new LegacyReaderContext( newContextId(), indexService, indexShard, searcherSupplier.get(), - randomNonNegativeLong(), - false + shardSearchRequest, + randomNonNegativeLong() ); // rescore is null but sliceBuilder is not null DefaultSearchContext context2 = new DefaultSearchContext( @@ -404,6 +408,52 @@ protected Engine.Searcher acquireSearcherInternal(String source) { assertTrue(query1 instanceof MatchNoDocsQuery || query2 instanceof MatchNoDocsQuery); readerContext.close(); + + ReaderContext pitReaderContext = new PitReaderContext( + newContextId(), + indexService, + indexShard, + searcherSupplier.get(), + 1000, + true + ); + DefaultSearchContext context5 = new DefaultSearchContext( + pitReaderContext, + shardSearchRequest, + target, + null, + bigArrays, + null, + timeout, + null, + false, + Version.CURRENT, + false, + executor + ); + int numSlicesForPit = maxSlicesPerPit + randomIntBetween(1, 100); + when(sliceBuilder.getMax()).thenReturn(numSlicesForPit); + context5.sliceBuilder(sliceBuilder); + + OpenSearchRejectedExecutionException exception1 = expectThrows( + OpenSearchRejectedExecutionException.class, + () -> context5.preProcess(false) + ); + assertThat( + exception1.getMessage(), + equalTo( + "The number of slices [" + + numSlicesForPit + + "] is too large. It must " + + "be less than [" + + maxSlicesPerPit + + "]. This limit can be set by changing the [" + + IndexSettings.MAX_SLICES_PER_PIT.getKey() + + "] index level setting." + ) + ); + pitReaderContext.close(); + threadPool.shutdown(); } } diff --git a/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java new file mode 100644 index 0000000000000..e69b2cc523638 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java @@ -0,0 +1,351 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.junit.After; +import org.junit.Before; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.search.builder.PointInTimeBuilder; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; + +/** + * Multi node integration tests for delete PIT use cases + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) +public class DeletePitMultiNodeTests extends OpenSearchIntegTestCase { + + @Before + public void setupIndex() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + } + + @After + public void clearIndex() { + client().admin().indices().prepareDelete("index").get(); + } + + private CreatePitResponse createPitOnIndex(String index) throws ExecutionException, InterruptedException { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { index }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + return execute.get(); + } + + public void testDeletePit() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + execute = client().execute(CreatePitAction.INSTANCE, request); + pitResponse = execute.get(); + pitIds.add(pitResponse.getId()); + validatePitStats("index", 10, 0); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = deleteExecute.get(); + assertEquals(2, deletePITResponse.getDeletePitResults().size()); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSuccessful()); + } + validatePitStats("index", 0, 10); + /** + * Checking deleting the same PIT id again results in succeeded + */ + deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + deletePITResponse = deleteExecute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSuccessful()); + } + } + + public void testDeletePitWithValidAndDeletedIds() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + validatePitStats("index", 5, 0); + + /** + * Delete Pit #1 + */ + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = deleteExecute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSuccessful()); + } + validatePitStats("index", 0, 5); + execute = client().execute(CreatePitAction.INSTANCE, request); + pitResponse = execute.get(); + pitIds.add(pitResponse.getId()); + validatePitStats("index", 5, 5); + /** + * Delete PIT with both Ids #1 (which is deleted) and #2 (which is present) + */ + deletePITRequest = new DeletePitRequest(pitIds); + deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + deletePITResponse = deleteExecute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSuccessful()); + } + validatePitStats("index", 0, 10); + } + + public void testDeletePitWithValidAndInvalidIds() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + pitIds.add("nondecodableid"); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + Exception e = assertThrows(ExecutionException.class, () -> deleteExecute.get()); + assertThat(e.getMessage(), containsString("invalid id")); + } + + public void testDeleteAllPits() throws Exception { + createPitOnIndex("index"); + createIndex("index1", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + createPitOnIndex("index1"); + validatePitStats("index", 5, 0); + validatePitStats("index1", 5, 0); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + + /** + * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + * not found exceptions don't result in failures ( as deletion in one node is successful ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); + assertTrue(deletePitInfo.isSuccessful()); + } + validatePitStats("index", 0, 5); + validatePitStats("index1", 0, 5); + client().admin().indices().prepareDelete("index1").get(); + } + + public void testDeletePitWhileNodeDrop() throws Exception { + CreatePitResponse pitResponse = createPitOnIndex("index"); + createIndex("index1", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + CreatePitResponse pitResponse1 = createPitOnIndex("index1"); + pitIds.add(pitResponse1.getId()); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + try { + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + } + } catch (Exception e) { + throw new AssertionError(e); + } + return super.onNodeStopped(nodeName); + } + }); + + ensureGreen(); + /** + * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + * not found exceptions don't result in failures ( as deletion in one node is successful ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSuccessful()); + } + client().admin().indices().prepareDelete("index1").get(); + } + + public void testDeleteAllPitsWhileNodeDrop() throws Exception { + createIndex("index1", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + createPitOnIndex("index1"); + ensureGreen(); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + try { + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); + } + } catch (Exception e) { + assertTrue(e.getMessage().contains("Node not connected")); + } + return super.onNodeStopped(nodeName); + } + }); + ensureGreen(); + /** + * When we invoke delete again, returns success as all readers are cleared. (Delete all on node which is Up and + * once the node restarts, all active contexts are cleared in the node ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + assertEquals(0, deletePITResponse.getDeletePitResults().size()); + client().admin().indices().prepareDelete("index1").get(); + } + + public void testDeleteWhileSearch() throws Exception { + CreatePitResponse pitResponse = createPitOnIndex("index"); + ensureGreen(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + Thread[] threads = new Thread[5]; + CountDownLatch latch = new CountDownLatch(threads.length); + final AtomicBoolean deleted = new AtomicBoolean(false); + + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + latch.countDown(); + try { + latch.await(); + for (int j = 0; j < 30; j++) { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .execute() + .get(); + } + } catch (Exception e) { + /** + * assert for exception once delete pit goes through. throw error in case of any exeption before that. + */ + if (deleted.get() == true) { + if (!e.getMessage().contains("all shards failed")) throw new AssertionError(e); + return; + } + throw new AssertionError(e); + } + }); + threads[i].setName("opensearch[node_s_0][search]"); + threads[i].start(); + } + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + deleted.set(true); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSuccessful()); + } + + for (Thread thread : threads) { + thread.join(); + } + } + + public void testtConcurrentDeletes() throws InterruptedException, ExecutionException { + CreatePitResponse pitResponse = createPitOnIndex("index"); + ensureGreen(); + int concurrentRuns = randomIntBetween(20, 50); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + AtomicInteger numDeleteAcknowledged = new AtomicInteger(); + TestThreadPool testThreadPool = null; + try { + testThreadPool = new TestThreadPool(DeletePitMultiNodeTests.class.getName()); + List operationThreads = new ArrayList<>(); + CountDownLatch countDownLatch = new CountDownLatch(concurrentRuns); + for (int i = 0; i < concurrentRuns; i++) { + Runnable thread = () -> { + logger.info("Triggering pit delete --->"); + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(DeletePitResponse deletePitResponse) { + if (deletePitResponse.getDeletePitResults().get(0).isSuccessful()) { + numDeleteAcknowledged.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(DeletePitAction.INSTANCE, deletePITRequest, listener); + }; + operationThreads.add(thread); + } + TestThreadPool finalTestThreadPool = testThreadPool; + operationThreads.forEach(runnable -> finalTestThreadPool.executor("generic").execute(runnable)); + countDownLatch.await(); + assertEquals(concurrentRuns, numDeleteAcknowledged.get()); + } finally { + ThreadPool.terminate(testThreadPool, 500, TimeUnit.MILLISECONDS); + } + } + + public void validatePitStats(String index, long expectedPitCurrent, long expectedPitCount) throws ExecutionException, + InterruptedException { + IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); + indicesStatsRequest.indices(index); + indicesStatsRequest.all(); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().stats(indicesStatsRequest).get(); + long pitCurrent = indicesStatsResponse.getIndex(index).getTotal().search.getTotal().getPitCurrent(); + long pitCount = indicesStatsResponse.getIndex(index).getTotal().search.getTotal().getPitCount(); + assertEquals(expectedPitCurrent, pitCurrent); + assertEquals(expectedPitCount, pitCount); + } + +} diff --git a/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java b/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java new file mode 100644 index 0000000000000..5944e2a35b14a --- /dev/null +++ b/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; + +public class DeletePitResponseTests extends OpenSearchTestCase { + + public void testDeletePitResponseToXContent() throws IOException { + DeletePitInfo deletePitInfo = new DeletePitInfo(true, "pitId"); + List deletePitInfoList = new ArrayList<>(); + deletePitInfoList.add(deletePitInfo); + DeletePitResponse deletePitResponse = new DeletePitResponse(deletePitInfoList); + + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + deletePitResponse.toXContent(builder, ToXContent.EMPTY_PARAMS); + } + assertEquals(true, deletePitResponse.getDeletePitResults().get(0).getPitId().equals("pitId")); + assertEquals(true, deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + } + + public void testDeletePitResponseToAndFromXContent() throws IOException { + XContentType xContentType = randomFrom(XContentType.values()); + DeletePitResponse originalResponse = createDeletePitResponseTestItem(); + ; + BytesReference originalBytes = toShuffledXContent(originalResponse, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + DeletePitResponse parsedResponse; + try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + parsedResponse = DeletePitResponse.fromXContent(parser); + } + assertEquals( + originalResponse.getDeletePitResults().get(0).isSuccessful(), + parsedResponse.getDeletePitResults().get(0).isSuccessful() + ); + assertEquals(originalResponse.getDeletePitResults().get(0).getPitId(), parsedResponse.getDeletePitResults().get(0).getPitId()); + BytesReference parsedBytes = XContentHelper.toXContent(parsedResponse, xContentType, randomBoolean()); + assertToXContentEquivalent(originalBytes, parsedBytes, xContentType); + } + + private static DeletePitResponse createDeletePitResponseTestItem() { + DeletePitInfo deletePitInfo = new DeletePitInfo(randomBoolean(), "pitId"); + List deletePitInfoList = new ArrayList<>(); + deletePitInfoList.add(deletePitInfo); + return new DeletePitResponse(deletePitInfoList); + } +} diff --git a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java new file mode 100644 index 0000000000000..a23e4141a78e4 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java @@ -0,0 +1,478 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.action.admin.cluster.state.ClusterStateRequest; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.GetAllPitNodesRequest; +import org.opensearch.action.search.GetAllPitNodesResponse; +import org.opensearch.action.search.GetAllPitsAction; +import org.opensearch.action.search.PitTestsUtil; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.search.builder.PointInTimeBuilder; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.containsString; +import static org.opensearch.action.search.PitTestsUtil.assertSegments; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +/** + * Multi node integration tests for PIT creation and search operation with PIT ID. + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) +public class PitMultiNodeTests extends OpenSearchIntegTestCase { + + @Before + public void setupIndex() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + } + + @After + public void clearIndex() { + client().admin().indices().prepareDelete("index").get(); + } + + public void testPit() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchResponse searchResponse = client().prepareSearch("index") + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertEquals(2, searchResponse.getSuccessfulShards()); + assertEquals(2, searchResponse.getTotalShards()); + validatePitStats("index", 2, 2); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, client(), pitResponse.getId()); + } + + public void testCreatePitWhileNodeDropWithAllowPartialCreationFalse() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), false); + request.setIndices(new String[] { "index" }); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + assertTrue(ex.getMessage().contains("Failed to execute phase [create_pit]")); + assertTrue(ex.getMessage().contains("Partial shards failure")); + validatePitStats("index", 0, 0); + return super.onNodeStopped(nodeName); + } + }); + } + + public void testCreatePitWhileNodeDropWithAllowPartialCreationTrue() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + assertSegments(false, "index", 1, client(), pitResponse.getId()); + assertEquals(1, pitResponse.getSuccessfulShards()); + assertEquals(2, pitResponse.getTotalShards()); + SearchResponse searchResponse = client().prepareSearch("index") + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertEquals(1, searchResponse.getSuccessfulShards()); + assertEquals(1, searchResponse.getTotalShards()); + validatePitStats("index", 1, 1); + return super.onNodeStopped(nodeName); + } + }); + } + + public void testPitSearchWithNodeDrop() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + SearchResponse searchResponse = client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertEquals(1, searchResponse.getSuccessfulShards()); + assertEquals(1, searchResponse.getFailedShards()); + assertEquals(0, searchResponse.getSkippedShards()); + assertEquals(2, searchResponse.getTotalShards()); + validatePitStats("index", 1, 1); + PitTestsUtil.assertUsingGetAllPits(client(), pitResponse.getId(), pitResponse.getCreationTime()); + return super.onNodeStopped(nodeName); + } + }); + } + + public void testPitSearchWithNodeDropWithPartialSearchResultsFalse() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .setAllowPartialSearchResults(false) + .execute(); + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + assertTrue(ex.getMessage().contains("Partial shards failure")); + return super.onNodeStopped(nodeName); + } + }); + } + + public void testPitInvalidDefaultKeepAlive() { + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("point_in_time.max_keep_alive", "1m").put("search.default_keep_alive", "2m")) + .get() + ); + assertThat(exc.getMessage(), containsString("was (2m > 1m)")); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "5m").put("point_in_time.max_keep_alive", "5m")) + .get() + ); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "2m")) + .get() + ); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("point_in_time.max_keep_alive", "2m")) + .get() + ); + exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "3m")) + .get() + ); + assertThat(exc.getMessage(), containsString("was (3m > 2m)")); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "1m")) + .get() + ); + exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("point_in_time.max_keep_alive", "30s")) + .get() + ); + assertThat(exc.getMessage(), containsString("was (1m > 30s)")); + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + public void testConcurrentCreates() throws InterruptedException { + CreatePitRequest createPitRequest = new CreatePitRequest(TimeValue.timeValueDays(1), true); + createPitRequest.setIndices(new String[] { "index" }); + + int concurrentRuns = randomIntBetween(20, 50); + AtomicInteger numSuccess = new AtomicInteger(); + TestThreadPool testThreadPool = null; + try { + testThreadPool = new TestThreadPool(DeletePitMultiNodeTests.class.getName()); + List operationThreads = new ArrayList<>(); + CountDownLatch countDownLatch = new CountDownLatch(concurrentRuns); + Set createSet = new HashSet<>(); + for (int i = 0; i < concurrentRuns; i++) { + Runnable thread = () -> { + logger.info("Triggering pit create --->"); + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPitResponse) { + if (createSet.add(createPitResponse.getId())) { + numSuccess.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(CreatePitAction.INSTANCE, createPitRequest, listener); + }; + operationThreads.add(thread); + } + TestThreadPool finalTestThreadPool = testThreadPool; + operationThreads.forEach(runnable -> finalTestThreadPool.executor("generic").execute(runnable)); + countDownLatch.await(); + assertEquals(concurrentRuns, numSuccess.get()); + } finally { + ThreadPool.terminate(testThreadPool, 500, TimeUnit.MILLISECONDS); + } + } + + public void testConcurrentCreatesWithDeletes() throws InterruptedException, ExecutionException { + CreatePitRequest createPitRequest = new CreatePitRequest(TimeValue.timeValueDays(1), true); + createPitRequest.setIndices(new String[] { "index" }); + List pitIds = new ArrayList<>(); + String id = client().execute(CreatePitAction.INSTANCE, createPitRequest).get().getId(); + pitIds.add(id); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + Set createSet = new HashSet<>(); + AtomicInteger numSuccess = new AtomicInteger(); + TestThreadPool testThreadPool = null; + try { + testThreadPool = new TestThreadPool(PitMultiNodeTests.class.getName()); + int concurrentRuns = randomIntBetween(20, 50); + + List operationThreads = new ArrayList<>(); + CountDownLatch countDownLatch = new CountDownLatch(concurrentRuns); + long randomDeleteThread = randomLongBetween(0, concurrentRuns - 1); + for (int i = 0; i < concurrentRuns; i++) { + int currentThreadIteration = i; + Runnable thread = () -> { + if (currentThreadIteration == randomDeleteThread) { + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPitResponse) { + if (createSet.add(createPitResponse.getId())) { + numSuccess.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(CreatePitAction.INSTANCE, createPitRequest, listener); + } else { + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(DeletePitResponse deletePitResponse) { + if (deletePitResponse.getDeletePitResults().get(0).isSuccessful()) { + numSuccess.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(DeletePitAction.INSTANCE, deletePITRequest, listener); + } + }; + operationThreads.add(thread); + } + TestThreadPool finalTestThreadPool = testThreadPool; + operationThreads.forEach(runnable -> finalTestThreadPool.executor("generic").execute(runnable)); + countDownLatch.await(); + assertEquals(concurrentRuns, numSuccess.get()); + + } finally { + ThreadPool.terminate(testThreadPool, 500, TimeUnit.MILLISECONDS); + } + } + + public void validatePitStats(String index, long expectedPitCurrent, long expectedOpenContexts) throws ExecutionException, + InterruptedException { + IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest(); + indicesStatsRequest.indices("index"); + indicesStatsRequest.all(); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().stats(indicesStatsRequest).get(); + long pitCurrent = indicesStatsResponse.getIndex(index).getTotal().search.getTotal().getPitCurrent(); + long openContexts = indicesStatsResponse.getIndex(index).getTotal().search.getOpenContexts(); + assertEquals(expectedPitCurrent, pitCurrent); + assertEquals(expectedOpenContexts, openContexts); + } + + public void testGetAllPits() throws Exception { + client().admin().indices().prepareCreate("index1").get(); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index", "index1" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + CreatePitResponse pitResponse1 = client().execute(CreatePitAction.INSTANCE, request).get(); + CreatePitResponse pitResponse2 = client().execute(CreatePitAction.INSTANCE, request).get(); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client().admin().cluster().state(clusterStateRequest).get(); + final List nodes = new LinkedList<>(); + for (ObjectCursor cursor : clusterStateResponse.getState().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(disNodesArr); + ActionFuture execute1 = client().execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); + GetAllPitNodesResponse getPitResponse = execute1.get(); + assertEquals(3, getPitResponse.getPitInfos().size()); + List resultPitIds = getPitResponse.getPitInfos().stream().map(p -> p.getPitId()).collect(Collectors.toList()); + // asserting that we get all unique PIT IDs + Assert.assertTrue(resultPitIds.contains(pitResponse.getId())); + Assert.assertTrue(resultPitIds.contains(pitResponse1.getId())); + Assert.assertTrue(resultPitIds.contains(pitResponse2.getId())); + client().admin().indices().prepareDelete("index1").get(); + } + + public void testGetAllPitsDuringNodeDrop() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(getDiscoveryNodes()); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute1 = client().execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest); + GetAllPitNodesResponse getPitResponse = execute1.get(); + // we still get a pit id from the data node which is up + assertEquals(1, getPitResponse.getPitInfos().size()); + // failure for node drop + assertEquals(1, getPitResponse.failures().size()); + assertTrue(getPitResponse.failures().get(0).getMessage().contains("Failed node")); + return super.onNodeStopped(nodeName); + } + }); + } + + private DiscoveryNode[] getDiscoveryNodes() throws ExecutionException, InterruptedException { + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.local(false); + clusterStateRequest.clear().nodes(true).routingTable(true).indices("*"); + ClusterStateResponse clusterStateResponse = client().admin().cluster().state(clusterStateRequest).get(); + final List nodes = new LinkedList<>(); + for (ObjectCursor cursor : clusterStateResponse.getState().nodes().getDataNodes().values()) { + DiscoveryNode node = cursor.value; + nodes.add(node); + } + DiscoveryNode[] disNodesArr = new DiscoveryNode[nodes.size()]; + nodes.toArray(disNodesArr); + return disNodesArr; + } + + public void testConcurrentGetWithDeletes() throws InterruptedException, ExecutionException { + CreatePitRequest createPitRequest = new CreatePitRequest(TimeValue.timeValueDays(1), true); + createPitRequest.setIndices(new String[] { "index" }); + List pitIds = new ArrayList<>(); + String id = client().execute(CreatePitAction.INSTANCE, createPitRequest).get().getId(); + pitIds.add(id); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + GetAllPitNodesRequest getAllPITNodesRequest = new GetAllPitNodesRequest(getDiscoveryNodes()); + AtomicInteger numSuccess = new AtomicInteger(); + TestThreadPool testThreadPool = null; + try { + testThreadPool = new TestThreadPool(PitMultiNodeTests.class.getName()); + int concurrentRuns = randomIntBetween(20, 50); + + List operationThreads = new ArrayList<>(); + CountDownLatch countDownLatch = new CountDownLatch(concurrentRuns); + long randomDeleteThread = randomLongBetween(0, concurrentRuns - 1); + for (int i = 0; i < concurrentRuns; i++) { + int currentThreadIteration = i; + Runnable thread = () -> { + if (currentThreadIteration == randomDeleteThread) { + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(GetAllPitNodesResponse getAllPitNodesResponse) { + if (getAllPitNodesResponse.failures().isEmpty()) { + numSuccess.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(GetAllPitsAction.INSTANCE, getAllPITNodesRequest, listener); + } else { + LatchedActionListener listener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(DeletePitResponse deletePitResponse) { + if (deletePitResponse.getDeletePitResults().get(0).isSuccessful()) { + numSuccess.incrementAndGet(); + } + } + + @Override + public void onFailure(Exception e) {} + }, countDownLatch); + client().execute(DeletePitAction.INSTANCE, deletePITRequest, listener); + } + }; + operationThreads.add(thread); + } + TestThreadPool finalTestThreadPool = testThreadPool; + operationThreads.forEach(runnable -> finalTestThreadPool.executor("generic").execute(runnable)); + countDownLatch.await(); + assertEquals(concurrentRuns, numSuccess.get()); + + } finally { + ThreadPool.terminate(testThreadPool, 500, TimeUnit.MILLISECONDS); + } + } + +} diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 4e342875e4599..1f824d40eb638 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -41,11 +41,16 @@ import org.opensearch.action.OriginalIndices; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.PitSearchContextIdForNode; +import org.opensearch.action.search.SearchContextIdForNode; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchShardTask; import org.opensearch.action.search.SearchType; +import org.opensearch.action.search.UpdatePitContextRequest; +import org.opensearch.action.search.UpdatePitContextResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.WriteRequest; @@ -1406,9 +1411,10 @@ public void testOpenReaderContext() { createIndex("index"); SearchService searchService = getInstanceFromNode(SearchService.class); PlainActionFuture future = new PlainActionFuture<>(); - searchService.openReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); future.actionGet(); assertThat(searchService.getActiveContexts(), equalTo(1)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); assertTrue(searchService.freeReaderContext(future.actionGet())); } @@ -1422,4 +1428,141 @@ private ReaderContext createReaderContext(IndexService indexService, IndexShard false ); } + + public void testDeletePitReaderContext() throws ExecutionException, InterruptedException { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + List contextIds = new ArrayList<>(); + ShardSearchContextId shardSearchContextId = future.actionGet(); + PitSearchContextIdForNode pitSearchContextIdForNode = new PitSearchContextIdForNode( + "1", + new SearchContextIdForNode(null, "node1", shardSearchContextId) + ); + contextIds.add(pitSearchContextIdForNode); + + assertThat(searchService.getActiveContexts(), equalTo(1)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); + validatePitStats("index", 1, 0, 0); + DeletePitResponse deletePitResponse = searchService.freeReaderContextsIfFound(contextIds); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + // assert true for reader context not found + deletePitResponse = searchService.freeReaderContextsIfFound(contextIds); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSuccessful()); + // adding this assert to showcase behavior difference + assertFalse(searchService.freeReaderContext(future.actionGet())); + validatePitStats("index", 0, 1, 0); + } + + public void testPitContextMaxKeepAlive() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> { + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueHours(25), future); + future.actionGet(); + }); + assertEquals( + "Keep alive for request (1d) is too large. " + + "It must be less than (" + + SearchService.MAX_PIT_KEEPALIVE_SETTING.get(Settings.EMPTY) + + "). " + + "This limit can be set by changing the [" + + SearchService.MAX_PIT_KEEPALIVE_SETTING.getKey() + + "] cluster level setting.", + ex.getMessage() + ); + assertThat(searchService.getActiveContexts(), equalTo(0)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(0)); + } + + public void testUpdatePitId() throws ExecutionException, InterruptedException { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + ShardSearchContextId id = future.actionGet(); + PlainActionFuture updateFuture = new PlainActionFuture<>(); + UpdatePitContextRequest updateRequest = new UpdatePitContextRequest( + id, + "pitId", + TimeValue.timeValueMinutes(between(1, 10)).millis(), + System.currentTimeMillis() + ); + searchService.updatePitIdAndKeepAlive(updateRequest, updateFuture); + UpdatePitContextResponse updateResponse = updateFuture.actionGet(); + assertTrue(updateResponse.getPitId().equalsIgnoreCase("pitId")); + assertTrue(updateResponse.getCreationTime() == updateRequest.getCreationTime()); + assertTrue(updateResponse.getKeepAlive() == updateRequest.getKeepAlive()); + assertTrue(updateResponse.getPitId().equalsIgnoreCase("pitId")); + assertThat(searchService.getActiveContexts(), equalTo(1)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); + validatePitStats("index", 1, 0, 0); + assertTrue(searchService.freeReaderContext(future.actionGet())); + validatePitStats("index", 0, 1, 0); + } + + public void testUpdatePitIdMaxKeepAlive() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + ShardSearchContextId id = future.actionGet(); + + UpdatePitContextRequest updateRequest = new UpdatePitContextRequest( + id, + "pitId", + TimeValue.timeValueHours(25).millis(), + System.currentTimeMillis() + ); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> { + PlainActionFuture updateFuture = new PlainActionFuture<>(); + searchService.updatePitIdAndKeepAlive(updateRequest, updateFuture); + }); + + assertEquals( + "Keep alive for request (1d) is too large. " + + "It must be less than (" + + SearchService.MAX_PIT_KEEPALIVE_SETTING.get(Settings.EMPTY) + + "). " + + "This limit can be set by changing the [" + + SearchService.MAX_PIT_KEEPALIVE_SETTING.getKey() + + "] cluster level setting.", + ex.getMessage() + ); + assertThat(searchService.getActiveContexts(), equalTo(1)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(1)); + assertTrue(searchService.freeReaderContext(future.actionGet())); + } + + public void testUpdatePitIdWithInvalidReaderId() { + SearchService searchService = getInstanceFromNode(SearchService.class); + ShardSearchContextId id = new ShardSearchContextId("session", 9); + + UpdatePitContextRequest updateRequest = new UpdatePitContextRequest( + id, + "pitId", + TimeValue.timeValueHours(23).millis(), + System.currentTimeMillis() + ); + SearchContextMissingException ex = expectThrows(SearchContextMissingException.class, () -> { + PlainActionFuture updateFuture = new PlainActionFuture<>(); + searchService.updatePitIdAndKeepAlive(updateRequest, updateFuture); + }); + + assertEquals("No search context found for id [" + id.getId() + "]", ex.getMessage()); + assertThat(searchService.getActiveContexts(), equalTo(0)); + assertThat(searchService.getAllPITReaderContexts().size(), equalTo(0)); + } + + public void validatePitStats(String index, long expectedPitCurrent, long expectedPitCount, int shardId) throws ExecutionException, + InterruptedException { + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex(index)); + IndexShard indexShard = indexService.getShard(shardId); + assertEquals(expectedPitCurrent, indexShard.searchStats().getTotal().getPitCurrent()); + assertEquals(expectedPitCount, indexShard.searchStats().getTotal().getPitCount()); + } } diff --git a/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java new file mode 100644 index 0000000000000..5ca384daedbff --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pit; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestCreatePitAction; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; +import org.opensearch.test.rest.FakeRestChannel; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests to verify behavior of create pit rest action + */ +public class RestCreatePitActionTests extends OpenSearchTestCase { + public void testRestCreatePit() throws Exception { + SetOnce createPitCalled = new SetOnce<>(); + RestCreatePitAction action = new RestCreatePitAction(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void createPit(CreatePitRequest request, ActionListener listener) { + createPitCalled.set(true); + assertThat(request.getKeepAlive().getStringRep(), equalTo("1m")); + assertFalse(request.shouldAllowPartialPitCreation()); + } + }) { + Map params = new HashMap<>(); + params.put("keep_alive", "1m"); + params.put("allow_partial_pit_creation", "false"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(createPitCalled.get(), equalTo(true)); + } + } + + public void testRestCreatePitDefaultPartialCreation() throws Exception { + SetOnce createPitCalled = new SetOnce<>(); + RestCreatePitAction action = new RestCreatePitAction(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void createPit(CreatePitRequest request, ActionListener listener) { + createPitCalled.set(true); + assertThat(request.getKeepAlive().getStringRep(), equalTo("1m")); + assertTrue(request.shouldAllowPartialPitCreation()); + } + }) { + Map params = new HashMap<>(); + params.put("keep_alive", "1m"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(createPitCalled.get(), equalTo(true)); + } + } +} diff --git a/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java new file mode 100644 index 0000000000000..0bfa16aafe1e3 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pit; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestDeletePitAction; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; +import org.opensearch.test.rest.FakeRestChannel; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +/** + * Tests to verify the behavior of rest delete pit action for list delete and delete all PIT endpoints + */ +public class RestDeletePitActionTests extends OpenSearchTestCase { + public void testParseDeletePitRequestWithInvalidJsonThrowsException() throws Exception { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{invalid_json}"), + XContentType.JSON + ).build(); + Exception e = expectThrows(IllegalArgumentException.class, () -> action.prepareRequest(request, null)); + assertThat(e.getMessage(), equalTo("Failed to parse request body")); + } + + public void testDeletePitWithBody() throws Exception { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("BODY")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{\"pit_id\": [\"BODY\"]}"), + XContentType.JSON + ).build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(pitCalled.get(), equalTo(true)); + } + } + + public void testDeleteAllPit() throws Exception { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("_all")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_all").build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(pitCalled.get(), equalTo(true)); + } + } + + public void testDeleteAllPitWithBody() { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("_all")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{\"pit_id\": [\"BODY\"]}"), + XContentType.JSON + ).withPath("/_all").build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.handleRequest(request, channel, nodeClient) + ); + assertTrue(ex.getMessage().contains("request [GET /_all] does not support having a body")); + } + } + + public void testDeletePitQueryStringParamsShouldThrowException() { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(2)); + assertThat(request.getPitIds().get(0), equalTo("QUERY_STRING")); + assertThat(request.getPitIds().get(1), equalTo("QUERY_STRING_1")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams( + Collections.singletonMap("pit_id", "QUERY_STRING,QUERY_STRING_1") + ).build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.handleRequest(request, channel, nodeClient) + ); + assertTrue(ex.getMessage().contains("unrecognized param")); + } + } +}